use Array: all;
use SimplePrint: all;
use SimpleFibre: all;
use Math: all;
use String:{tochar};
/*
% This is the APEX stdlib.sis include file.
% Standard equates and constants for APL compiler
% Also standard coercion functions
% For SAC, we map names of system variables, system functions,
% quad, and quote-quadfrom APL symbols to legal SAC names
% by replacing the quad/quote-quad with QUAD or QUOTEQUAD.
% This happens in the syntax analyzer, for lack of a better
% place. We also map quad/quote-quad to function calls,
% removing the assignment. R. Bernecky 2004-06-11
*/
#define toB(x) tob((x))
#define toI(x) toi((x))
#define toD(x) tod((x))
#define toC(x) (x)
/* The toC above is wrong, but one thing at a time... */
/* Empty vectors */
#define EMPTYBOOL _drop_SxV_(1, [true])
#define EMPTYINT _drop_SxV_(1, [0])
#define EMPTYDOUBLE _drop_SxV_(1, [0d])
#define EMPTYCHAR _drop_SxV_(1, [' '])
/* end of empty array jokes */
void APEXERROR(char[.] msg)
{
/* Error function. This needs work, as it should kill
* the running task, too.
*/
/*print(msg); */
return();
}
/* Structural function utility functions */
/* Ravel utility */
#define APEXRAVEL(y) (_reshape_([prod(_shape_(y))],y))
inline int VectorRotateAmount(int x, int y)
{ /* Normalize x rotate for array of shape y on selected axis */
/* normalize rotation count */
if (x>0)
z = _mod_(x,y);
else
z = y - _mod_(_abs_(x),y);
return(z);
}
inline bool SameShape(int[*] x, int[*] y)
{ /* Predicate for two shape vectors having same shape */
if (_dim_(x) != _dim_(y))
z = false;
else
z = with ([0] <= i < [_dim_(y)])
fold(dandBBB, true,
(_shape_(x))[i] == (_shape_(y))[i]);
return(z);
}
/* First-axis catenate, stolen from NTCtemplates_array.mac */
/**
*
* @fn [*] psel( int[.] idx, [*] Array)
*
* @brief selects the subarray of Array at position idx, provided
* shape( idx)[[0]] <= dim( Array) holds.
*
******************************************************************************/
#define PSEL( a) \
inline \
a[*] psel( int[.] idx, a[*] array) \
{ \
new_shape = _drop_SxV_( _sel_( [0], _shape_(idx)), _shape_(array)); \
res = with( . <= iv <= .) { \
new_idx = _cat_VxV_( idx, iv); \
} genarray( new_shape, _sel_(new_idx, array), zero( array)); \
return( res); \
}
/* Indexing helper functions */
inline int[*] indrfr(int fr, int[*] i, int[+] X)
{ /* X[;;;i;;;], where i has fr semicolons to its left */
/* Indexing is origin-0. Invocation will correct this */
/* This could stand some optimization, perhaps, for boolean i,
* unless SAC avoids building an array-valued temp of toI(i).
*/
cellshape = _shape_(i)++_drop_SxV_(fr+1,_shape_(X));
cell = genarray(cellshape,0); /* not used, but SAC needs help */
frameshape = _take_SxV_(fr,_shape_(X));
z = with (. <= iv <= .)
genarray(frameshape,indraxis0(i,psel(iv,X)),cell);
zshape = frameshape++cellshape;
return(_reshape_(zshape,z));
}
inline int[*] indraxis0(int[*] i, int[*] X)
{ /* Helper function for indexing along leading axis */
cellshape = _drop_SxV_(1,_shape_(X));
cell = genarray(cellshape, 0);
raveli = APEXRAVEL(i);
z = with (. <= iv <= .)
genarray(_shape_(raveli), psel([raveli[iv]],X),cell);
z = _reshape_(_shape_(i)++cellshape,z);
return(z);
}
#define APEXMIOTA(N) with (. <= [iv] <= .) genarray([N], iv)
#define BUILT_IN( fun) \
fun( int) \
fun( float) \
fun( double) \
fun( bool) \
fun( char)
BUILT_IN( PSEL)
#define CAT( a) \
inline \
a[*] (++)( a[+] arr_a, a[+] arr_b) \
{ \
new_shp = _modarray_( _shape_( arr_a), \
[0], \
_add_SxS_( _sel_([0], _shape_( arr_a)), \
_sel_([0], _shape_( arr_b)) ) ); \
res = with( . <= iv < _shape_( arr_a)) \
genarray( new_shp, _sel_( iv, arr_a)); \
offset = _modarray_( _mul_SxA_( 0, new_shp), \
[0], \
_sel_([0], _shape_( arr_a)) ); \
res = with( offset <= iv <= .) \
modarray( res, iv, _sel_( _sub_AxA_( iv, offset), arr_b)); \
return( res); \
}
/* scalar take matrix, adapted from NTCtemplates_array.mac */
#define APLTAKE(a) \
inline \
a[*] take( int v, a[*] array) \
{ \
v2 = _shape_(array); \
v2 = modarray(v2,[0],v); \
return( take(v2,array)); \
}
#define APEXPI 3.1415926535897932d
#define APEXE 2.718281828d
/* APEXPINFINITYI largest integer */
#define APEXPINFINITYD 1.7976931348623156D308
/* APEXMINFINITYI smallest integer */
#define APEXMINFINITYD -1.7976931348623156D308
inline bool[+] tob (bool[+] x)
{ z = with (. <= iv <= .)
genarray(_shape_(x),tob(_sel_(iv,x)));
return(z);
}
inline bool[+] tob (int[+] x)
{ z = with (. <= iv <= .)
genarray(_shape_(x),tob(_sel_(iv,x)));
return(z);
}
inline bool[+] tob (double[+] x)
{ z = with (. <= iv <= .)
genarray(_shape_(x),tob(_sel_(iv,x)));
return(z);
}
inline bool tob(bool x)
{ return(x);
}
inline bool tob(int x)
{ if (1 == _toi_S_(x))
z = true;
else
z = false;
return(z);
}
inline bool tob(double x)
{ if (1 == _toi_S_(x))
z = true;
else
z = false;
return(z);
}
/*
% Floating-point utilities
% This taken from page 93 of the 1993-01-06 version
% of Committee Draft 1 of the Extended ISO APL Standard
% NO {quad}ct support yet!
*/
#define Dsignum(p) (if p = 0.0d0 then 0 elseif p<0.0d0 then -1 else 1 end if)
#define Isignum(p) (if p = 0 then 0 elseif p<0 then -1 else 1 end if)
#define Dmod(p,q) fmod(tod(p),tod(q))
#define Imod(p,q) _mod_(p,q)
/*
% 1996-12-07 Pearl Harbor Day. Try to fix bug in DBank infdivm
% benchmark. (4 5 rho 6) + rank 1 (4 1 rho 7) introduces singleton
% vectors into scalar fn calls.
% Hence, we need support for singletons within scalar fns.
% There is an similar, but independent, bug in rank support for
% the extension case.
*/
/* vector-scalar simple search loops
* This is origin-0 x1 iota y0
* cfn is comparator function type suffix to use, e.g, b,i,d,c,
*/
#ifdef BROKEN
#define FindFirst(x,y,cfn,QUADct) \
sx = _shape_(x)[0]; \
z = sx; /* if not found */ \
for(i=0; i y[[j,i]]}); \
} \
inline TYPE[.,.,.] APEXTranspose(TYPE[.,.,.] y) \
{ /* Rank-3 transpose */ \
return({[i,j,k] -> y[[k,j,i]]}); \
} \
inline TYPE[.,.,.,.] APEXTranspose(TYPE[.,.,.,.] y) \
{ /* Rank-4 transpose */ \
return({[i,j,k,l] -> y[[l,k,j,i]]}); \
} \
inline TYPE[.,.,.,.,.] APEXTranspose(TYPE[.,.,.,.,.] y) \
{ /* Rank-5 transpose */ \
return({[i,j,k,l,m] -> y[[m,l,k,j,i]]}); \
} \
inline TYPE[.,.,.,.,.,.] APEXTranspose(TYPE[.,.,.,.,.,.] y) \
{ /* Rank-6 transpose */ \
return({[i,j,k,l,m,n] -> y[[n,m,l,k,j,i]]}); \
} \
inline TYPE[.,.,.,.,.,.,.] APEXTranspose(TYPE[.,.,.,.,.,.,.] y) \
{ /* Rank-7 transpose */ \
return({[i,j,k,l,m,n,o] -> y[[o,n,m,l,k,j,i]]}); \
}
APEXTRANSPOSE(bool)
APEXTRANSPOSE(int)
APEXTRANSPOSE(double)
APEXTRANSPOSE(char)
/* End of transpose utilities */
/* modulus functions */
inline int Dmodulo(int x, int y, double QUADct)
{
if (0 == x)
z = 0;
else
z = Imod(y,x);
return(z);
}
inline double Dmodulo(double x, double y, double QUADct)
{
/* See page 93 of the 1993-01-06 version
of Committee Draft 1 of the Extended ISO APL Standard
THIS NEEDS WORK TO SUPPORT QUADct!!!!
*/
if (0.0d == x)
z = 0.0d;
else
z = Dmod(y,x);
return(z);
}
/* End of boilerplate */
/*
% Dyadic Scalar Function kernel macro definitions
% Function names are defined as:
% {valence, jsymbol, compute type, result type}
% Having result type available will let us support things like
% int*int where we know result will be integer, rather than being
% required to support double_real result. 1996-05-05
*/
/* x plus y */
#define dplusBBI(XV,YV) (toi(XV)+toi(YV))
#define dplusBII(XV,YV) (toi(XV)+YV)
#define dplusIBI(XV,YV) (XV+toi(YV))
#define dplusIII(XV,YV) (XV+YV)
#define dplusDDD(XV,YV) (XV+YV)
#define dplusBDD(XV,YV) (tod(XV)+YV)
#define dplusDBD(XV,YV) (XV+tod(YV))
#define dplusIDD(XY,YV) (tod(XV)+YV)
#define dplusDID(XV,YV) (XV+tod(YV))
/* x minus y */
#define dbarBBI(XV,YV) (toi(XV)-toi(YV))
#define dbarIBI(XV,YV) (XV-toi(YV))
#define dbarBII(XV,YV) (XV-YV)
#define dbarIII(XV,YV) (XV-YV)
#define dbarIDD(XV,YV) (tod(XV)-YV)
#define dbarDID(XV,YV) (XV-tod(YV))
#define dbarDDD(XV,YV) (XV-YV)
#define dbarBDD(XV,YV) (tod(XV)-YV)
#define dbarDBD(XV,YV) (XV-tod(YV))
/* x times y */
#define dmpyBBB(XV,YV) (XV&YV)
#define dmpyIII(XV,YV) (XV*YV)
#define dmpyDDD(XV,YV) (XV*YV)
#define dmpyBII(XV,YV) (toi(XV)*YV)
#define dmpyIBI(XV,YV) (XV*toi(YV))
#define dmpyBDD(XV,YV) (tod(XV)*YV)
#define dmpyDBD(XV,YV) (XV*tod(YV))
#define dmpyDID(XV,YV) (XV*tod(YV))
#define dmpyIDD(XV,YV) (tod(XV)*YV)
/* x divided by y */
inline double APEX_DIVIDEDD (double X, double Y)
{
if (X == Y)
z = 1.0d;
else
z = X/Y;
return(z);
}
#define ddivBDD(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivIDD(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivDDD(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivBID(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivIID(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivDID(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivBBD(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivIBD(XV,YV) (APEX_DIVIDEDD(XV,YV))
#define ddivDBD(XV,YV) (APEX_DIVIDEDD(XV,YV))
/* x min y */
inline int APEX_MINII (int x, int y)
{
if (x <= y)
z = x;
else
z = y;
return (z);
}
inline char APEX_MINCC (char x, char y)
{
if (x <= y)
z = x;
else
z = y;
return (z);
}
inline double APEX_MINDD (double x, double y)
{
if (x <= y)
z = x;
else
z = y;
return (z);
}
#define dminBBB(XV,YV) (XV&YV)
#define dminIII(XV,YV) (APEX_MINII(XV,YV))
#define dminDDD(XV,YV) (APEX_MINDD(XV,YV))
#define dminCCC(XV,YV) (APEX_MINCC(XV,YV))
#define dminBII(XV,YV) (APEX_MINII(toi(XV),YV))
#define dminIBI(XV,YV) (APEX_MINII(XV,toi(YV)))
#define dminBDD(XV,YV) (APEX_MINDD(tod(XV),YV))
#define dminDBD(XV,YV) (APEX_MINDD(XV,tod(YV)))
#define dminIDD(XV,YV) (APEX_MINDD(tod(XV),YV))
#define dminDID(XV,YV) (APEX_MINDD(XV,tod(YV)))
/* NB. max and min extended to characters */
inline int APEX_MAXII (int x, int y)
{
if (x >= y) z = x;
else z = y;
return (z);
}
inline char APEX_MAXCC (char x, char y)
{
if (x >= y) z = x;
else z = y;
return (z);
}
inline double APEX_MAXDD (double x, double y)
{
if (x >= y)
z = x;
else
z = y;
return (z);
}
/* x max y */
#define dmaxBBB(XV,YV) (XV|YV)
#define dmaxIII(XV,YV) (APEX_MAXII(XV,YV))
#define dmaxDDD(XV,YV) (APEX_MAXDD(XV,YV))
#define dmaxCCC(XV,YV) (APEX_MAXCC(XV,YV))
#define dmaxBII(XV,YV) (APEX_MAXII(toi(XV),YV))
#define dmaxIBI(XV,YV) (APEX_MAXII(XV,toi(YV)))
#define dmaxBDD(XV,YV) (APEX_MAXDD(tod(XV),YV))
#define dmaxDBD(XV,YV) (APEX_MAXDD(XV,tod(YV)))
#define dmaxIDD(XV,YV) (APEX_MAXDD(tod(XV),YV))
#define dmaxDID(XV,YV) (APEX_MAXDD(XV,tod(YV)))
/* x mod y */
#define dmodBBB(XV,YV,QUADct) ((~XV)&YV)
#define dmodIII(XV,YV,QUADct) (Dmodulo(XV,YV))
#define dmodDDD(XV,YV,QUADct) (Dmodulo(XV,YV))
/* x star y */
#define dstarBBB(XV,YV) (XV|~YV)
#define dstarIDD(XV,YV) (exp(XV,YV))
#define dstarDDD(XV,YV) (exp(XV,YV))
/* The dstari fragment will not work in integer type because EmitDyadicScalarFns
% (and everyone else!) uses lhtype,rhtype to compute fragment
% type. This fails for general star because we do not know that
% the result type is going to be double_real, as we can not
% ascertain that the right argument is positive.
% Do it the slow way for now... 1995-11-18
% Actually, now that we have predicates, we can do better! 1996-04-30
*/
#define dlogBDD(XV,YV) (log(YV)/log(XV))
#define dlogIDD(XV,YV) (log(YV)/log(XV))
#define dlogDDD(XV,YV) (log(YV)/log(XV))
/* NB. Extension of ISO APL to allow comparison of characters */
/* relationals */
#define dltBBB(XV,YV,QUADct) ((~XV)&YV)
#define dltIBB(XV,YV,QUADct) (XVYV)
#define dgtDDB(XV,YV,QUADct) (XV>YV)
#define dgtCCB(XV,YV,QUADct) (XV>YV)
#define dgeBBB(XV,YV,QUADct) (XV|~YV)
#define dgeIIB(XV,YV,QUADct) (XV>=YV)
#define dgeDDB(XV,YV,QUADct) (XV>=YV)
#define dgeCCB(XV,YV,QUADct) (XV>=YV)
/* As of 2004-09-16, we don't support lcm/gcd. Needs work
in code generator and dfa to support side effects in scan, etc.
rbe
*/
inline bool dandBBB(bool x, bool y)
{ /* Boolean and */
z = x && y;
return(z);
}
/* Euclids algorithm for lcm */
#define dandII(XV,YV) (for initial ax := abs(XV); ay := abs(YV); \
u = min(ax,ay); v := max (ax,ay); \
while (v ~= 0) repeat \
v := mod(old u,old v); \
u := old v; \
returns value of (ax*ay)/u \
end for)
/* Euclids algorithm for lcm */
#define dandDD(XV,YV) (for initial ax := abs(XV); ay := abs(YV); \
u = min(ax,ay); v := max (ax,ay); \
while (v ~= 0) repeat \
v := mod(old u,old v); \
u := old v; \
returns value of (ax*ay)/u \
end for)
#define dorBBB(XV,YV) (XV||YV)
/* Euclids algorithm for gcd */
#define dorII(XV,YV) (for initial \
ax := abs(XV); ay := abs(YV); \
u = min(ax,ay); v := max (ax,ay); \
while (v ~= 0) repeat \
v := mod(old u,old v); \
u := old v; \
returns value of u \
end for)
/* Euclids algorithm for gcd */
#define dorDDD(XV,YV) (for initial \
ax := abs(XV); ay := abs(YV); \
u = min(ax,ay); v := max (ax,ay); \
while (v ~= 0) repeat \
v := mod(old u,old v); \
u := old v; \
returns value of u \
end for)
#define dnandBBB(XV,YV) (~XV&YV)
#define dnorBBB(XV,YV) (~XV|YV)
#define dcircDDD(XV,YV) (if (XV = 1.0d) then sin(YV) \
elseif (XV = 2.0d) then cos(YV) \
elseif (XV = 3.0d) then tan(YV) \
elseif (XV = 4.0d) then exp((1.0d+YV*YV),0.5d) \
else error[double_real] end if)
/* domain error check above */
/* 1 circle */
#define dcirc1DDD(XV,YV) (sin(YV))
/* 2 circle */
#define dcirc2DDD(XV,YV) (cos(YV))
/* 3 circle */
#define dcirc3DDD(XV,YV) (tan(YV))
/* 3 circle */
#define dcirc4DDD(XV,YV) (exp((1.0d+YV*YV),0.5d))
inline bool[+] neIIB(int[+] x, int[+] y,double QUADct)
{ /* AxA Dyadic scalar fn, shapes unknown, shapes may or may not match */
/* Insert length error checking code here!! */
z = with( . <= iv <= .) {
xel = toI(_sel_(iv,x));
yel = toI(_sel_(iv,y));
}
genarray( _shape_(x),
dneIIB(xel,yel, QUADct));
return(z);
}
inline int plusBIIsl(bool x, int y)
{ /* SxS dyadic scalar fn, shapes match */
z = dplusIII(toI(x),toI(y));
return(z);
}
inline int[*] rotrIII(int x, int[*] y)
{ /* Scalar rotate matrix last axis */
ix = toi(x);
k = VectorRotateAmount(ix,_shape_(y)[0]); /* Normalize rotate count */
/* Stupid sac rotate primitive goes in wrong direction */
z = rotate(_dim_(y)-1, -k, y);
return (z);
}
inline int[.] iotaXII(int shp, int QUADio)
{ /* Index generator on scalar */
/* HELP! Needs domain check for negative shp */
res = with( . <= [i] <= .)
genarray( [toI(shp)], i+QUADio);
return( res);
}
inline int[.] iotaXIINonNegsy(int shp, int QUADio)
{ /* Index generator on ScalarN when N is non-negative integer */
res = with( . <= [i] <= .)
genarray( [toI(shp)], i+QUADio);
return( res);
}
inline int[.] ugrdXII(int[.] y, int QUADio)
{ /* Upgrade on integer vector.
Do APL upgrade of vector y using heapsort.
This version from "Numerical Recipes in C", p. 249
Robert Bernecky 2005-11-17
Knuth, Vol. III, pp. 145-148 gives a good example.
APL model: (See workspace apex2003/wss/upgrade or
apex2003/wif/upgrade)
r{<-}upgradeHeap v;#io;N;qio;heap
@ Upgrade vector using heapsort
@ Heap stored using origin-1 indices, to ease computation
@ of child indices.
@ We fix it when we're done...
#io{<-}0
qio{<-}1 @ The fake quad-io
N{<-}{rho}v
:if N{<=}1
r{<-}{iota}N
:else
heap{<-}MakeHeap(v)
r{<-}(UnHeap(heap))-qio
:endif
*/
qio=1; /* Heapsort is sort of origin-1 */
N = _shape_(y)[0];
if (N <= 1)
z = APEXMIOTA(N);
else{
z = MakeHeap(y);
z = (UnHeap(z,y))-qio;
}
return(z+QUADio);
}
/*
r{<-}MakeHeap v;L;heap;ir;indxt;q;N
@ Build heap from v
@ We know v has at least two elements
N{<-}{rho}v
heap{<-}qio+{iota}N
ir{<-}N
:for L :in {reverse}1{drop}{iota}({floor}N{divide}2)+1
indxt{<-}heap[L-qio]
q{<-}v[indxt-qio]
@@#{<-}'MakeHeap on: ',q,' at L=',L, ' with indxt=',indxt
heap{<-}heap Heapness L,ir,q,indxt
:endfor
r{<-}heap
*/
inline int[.] MakeHeap(int[.] v)
{ /* Build heap from vector v. v has at least two elements */
N = _shape_(v)[0];
qio = 1;
heap = qio+APEXMIOTA(N);
ir=N;
max= 1+N/2;
for(L=max-1; L>0; L--){
indxt=heap[L-qio];
q=v[indxt-qio];
heap = Heapness(L,ir,q,indxt,heap,v);
}
return(heap);
}
inline int[.] UnHeap(int[.] heap, int[.]v)
{ /* Extract heap elements in top-to-bottom order */
/*
r{<-}UnHeap heap;ir;indxt;q;h
@ Extract elements from heap in top-to-bottom order;
@ Restore heap invariant, then place extracted element
@ at end of now-shortened heap
@ heap has at least two elements
h{<-}{reverse}1+qio+{iota}({rho}heap)-2
:for ir :in h
indxt{<-}heap[ir] @ Extract top item
q{<-}v[indxt-qio]
heap[ir]{<-}heap[0]
heap{<-}heap Heapness qio,ir,q,indxt
:endfor
h{<-}heap[+0 1] & heap[0 1]{<-}{reverse}h
@@ LENERR APL+Linux!!! heap[0 1]{<-}heap[1 0]
r{<-}heap
*/
qio=1;
for(ir=_shape_(heap)[0]-1; ir>=2; ir--){
indxt=heap[ir];
q=v[indxt-qio];
heap[ir]=heap[0];
heap=Heapness(qio,ir,q,indxt,heap,v);
}
t = heap[0]; /* This doesn't look kosher to me... */
heap[0]=heap[1];
heap[1]=t;
return(heap);
}
inline int[.] Heapness(int L, int ir, int q, int indxt, int[.] heap, int[.] v)
{ /* Restore heap invariant: For Origin-1 a[i], i member 1...N,
a[i/2]>=a[i].
APL Model:
r{<-}heap Heapness Liqt;L;ir;q;indxt;P;C;newC;lsibp;lsib;rsibp;rsib;bigsibp;bigsib
@ Restore heap ordering invariant
@@@#{<-}'heap values are: ',v[heap-qio]
@@@#{<-}'Liqt=',Liqt,' heap[indxt]=', heap[Liqt[3]-qio]
L{<-} Liqt[0] @ Top node to adjust
ir{<-} Liqt[1] @ Current heap size
q{<-} Liqt[2] @ Parent value
indxt{<-}Liqt[3] @ Parent node
@
P{<-}L @ Parent node
C{<-}P+P @ Child node
:while C{<=}ir
@ Find larger sibling index into v
:if C{>=}ir @ Fell off bottom of heap
newC{<-}C
:else
lsibp{<-}heap[C-qio]
lsib{<-}v[lsibp-qio]
rsibp{<-}heap[C+qio-1]
rsib{<-}v[rsibp-qio]
:if lsib upgradeGT rsib @ Left sib bigger
newC{<-}C
:elseif lsib upgradeLT rsib @ Right sib bigger
newC{<-}C+1
:elseif lsibp= ir) /* Fell off heap */
newC=C;
else{
lsibp=heap[C-qio];
lsib=v[lsibp-qio];
rsibp=heap[C+1-qio];
rsib=v[rsibp-qio];
if (upgradeGT(lsib,rsib)) /* Left sib bigger */
newC=C;
else if (upgradeLT(lsib,rsib)) /* Right sib bigger */
newC=C+1;
else if (lsibpy;
return(z);
}
inline bool upgradeLT(int x, int y)
{
z=x