Newer
Older
exit(-1);
}
for (int r = 1; r <= mat1.Nrows(); r++) {
for (int c =1; c <= mat1.Ncols(); c++) {
mat1(r,c) = mat1(r,c) * mat2(r,c);
}
}
}
ReturnMatrix vox_to_vox(const ColumnVector& xyz1,const ColumnVector& dims1,const ColumnVector& dims2,const Matrix& xfm){
ColumnVector xyz1_mm(4),xyz2_mm,xyz2(3);
xyz1_mm<<xyz1(1)*dims1(1)<<xyz1(2)*dims1(2)<<xyz1(3)*dims1(3)<<1;
xyz2_mm=xfm*xyz1_mm;
xyz2_mm=xyz2_mm/xyz2_mm(4);
xyz2<<xyz2_mm(1)/dims2(1)<<xyz2_mm(2)/dims2(2)<<xyz2_mm(3)/dims2(3);
xyz2.Release();
return xyz2;
}
ReturnMatrix mni_to_imgvox(const ColumnVector& mni,const ColumnVector& mni_origin,const Matrix& mni2img, const ColumnVector& img_dims){
ColumnVector mni_new_origin(4),img_mm;//homogeneous
ColumnVector img_vox(3);
mni_new_origin<<mni(1)+mni_origin(1)<<mni(2)+mni_origin(2)<<mni(3)+mni_origin(3)<<1;
img_mm=mni2img*mni_new_origin;
img_vox<<img_mm(1)/img_dims(1)<<img_mm(2)/img_dims(2)<<img_mm(3)/img_dims(3);
img_vox.Release();
return img_vox;
}
void remmean_econ(Matrix& mat, const int dim)
{
Matrix matmean;
remmean(mat, matmean, dim);
void remmean(Matrix& mat, Matrix& matmean, const int dim)
{
matmean = mean(mat,dim);
if (dim == 1){
for (int mr=1; mr<=mat.Nrows(); mr++)
mat.Row(mr) -= matmean.AsRow();
}
else{
for (int mc=1; mc<=mat.Ncols(); mc++)
mat.Column(mc) -= matmean.AsColumn();
}
}
void remmean(const Matrix& mat, Matrix& demeanedmat, Matrix& Mean, const int dim)
{
demeanedmat = mat;
remmean(demeanedmat,Mean, dim);
}
ReturnMatrix remmean(const Matrix& mat, const int dim)
{
Matrix res = mat;
remmean_econ(res,dim);
res.Release();
return res;
ReturnMatrix oldcov(const Matrix& mat, const int norm)
{
SymmetricMatrix res;
Matrix tmp;
int N;
tmp=remmean(mat);
if (norm == 1) {N = mat.Nrows();}
else {N = mat.Nrows()-1;}
res << tmp.t()*tmp;
res = res/N;
res.Release();
return res;
}
ReturnMatrix cov(const Matrix& data, const bool sampleCovariance, int econ)
{
//This assumes vectors are stored using column order in data
res << zeros(data.Ncols(),data.Ncols());
Matrix meanM(mean(data));
int N=data.Nrows();
if (sampleCovariance && N>1)
N--;
if ( econ < 1 )
econ=data.Nrows();
for(int startRow=1; startRow <= data.Nrows(); startRow+=econ) {
Matrix suba=data.SubMatrix(startRow,Min(startRow+econ-1,data.Nrows()),1,data.Ncols());
for (int row=1; row <= suba.Nrows(); row++)
suba.Row(row)-=meanM;
res << res + suba.t()*suba/N;
}
res.Release();
return res;
}
ReturnMatrix cov_r(const Matrix& data, const bool sampleCovariance, int econ)
{
//This assumes vectors are stored using row order in data
SymmetricMatrix res;
res << zeros(data.Nrows(),data.Nrows());
Matrix meanM(mean(data,2));
int N=data.Ncols();
if (sampleCovariance && N>1)
N--;
if ( econ < 1 )
econ=data.Ncols();
for(int startCol=1; startCol <= data.Ncols(); startCol+=econ) {
Matrix suba=data.SubMatrix(1,data.Nrows(),startCol,Min(startCol+econ-1,data.Ncols()));
for (int col=1; col <= suba.Ncols(); col++)
suba.Column(col)-=meanM;
res << res + suba*suba.t()/N;
}
ReturnMatrix cov_r(const Matrix& data, const Matrix& weights2, int econ)
{
//This assumes vectors are stored using row order in data, weights are a single "row". No bool flag as biased vs unbiased isn't relevant here
RowVector weights=((weights2/weights2.Sum()).AsRow());
SymmetricMatrix res;
res << zeros(data.Nrows(),data.Nrows());
Matrix meanM(mean(data,weights,2));
double N=(1-weights.SumSquare());//As weights.Sum() is equal to 1
if ( econ < 1 )
econ=data.Ncols();
for(int startCol=1; startCol <= data.Ncols(); startCol+=econ) {
Matrix suba=data.SubMatrix(1,data.Nrows(),startCol,Min(startCol+econ-1,data.Ncols()));
for (int col=1; col <= suba.Ncols(); col++) {
suba.Column(col)-=meanM;
suba.Column(col)*=sqrt(weights(startCol+col-1));
}
res << res + suba*suba.t()/N;
}
return res;
}
ReturnMatrix corrcoef(const Matrix& mat, const bool norm)
{
SymmetricMatrix res;
res = cov(mat,norm);
Matrix D;
D=diag(res);
D=sqrt(D*D.t());
res << SD(res,D);
res.Release();
return res;
}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
ReturnMatrix flipud(const Matrix& mat)
{
Matrix rmat(mat.Nrows(),mat.Ncols());
for(int j=1;j<=mat.Ncols();j++)
for(int i=1;i<=mat.Nrows();i++)
rmat(i,j)=mat(mat.Nrows()-i+1,j);
rmat.Release();
return rmat;
}
ReturnMatrix fliplr(const Matrix& mat)
{
Matrix rmat(mat.Nrows(),mat.Ncols());
for(int j=1;j<=mat.Ncols();j++)
for(int i=1;i<=mat.Nrows();i++)
rmat(i,j)=mat(i,mat.Ncols()-j+1);
rmat.Release();
return rmat;
}
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
void symm_orth(Matrix &Mat)
{
SymmetricMatrix Metric;
Metric << Mat.t()*Mat;
Metric = Metric.i();
Matrix tmpE;
DiagonalMatrix tmpD;
EigenValues(Metric,tmpD,tmpE);
Mat = Mat * tmpE * sqrt(abs(tmpD)) * tmpE.t();
}
void powerspectrum(const Matrix &Mat1, Matrix &Result, bool useLog)
//calculates the powerspectrum for every column of Mat1
{
Matrix res;
for(int ctr=1; ctr <= Mat1.Ncols(); ctr++)
{
ColumnVector tmpCol;
tmpCol=Mat1.Column(ctr);
ColumnVector FtmpCol_real;
ColumnVector FtmpCol_imag;
ColumnVector tmpPow;
RealFFT(tmpCol,FtmpCol_real,FtmpCol_imag);
pow(FtmpCol_real,2);
pow(FtmpCol_imag,2);
tmpPow = FtmpCol_real+FtmpCol_imag;
if(res.Storage()==0){res= tmpPow;}else{res|=tmpPow;}
}
Result=res;
}
void element_mod_n(Matrix& Mat,double n)
{
//represent each element in modulo n (useful for wrapping phases (n=2*M_PI))
double tmp;
for( int j=1;j<=Mat.Ncols();j++){
tmp = ( Mat(i,j) - rounddouble(Mat(i,j)/n)*n );
Mat(i,j)= tmp > 0 ? tmp : tmp + n;
}
}
}
}
Mark Jenkinson
committed
return (int)pow(2,ceil(log(float(n))/log(float(2))));
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
}
void xcorr(const Matrix& p_ts, Matrix& ret, int lag, int p_zeropad)
{
Tracer tr("MISCMATHS::xcorr");
int sizeTS = p_ts.Nrows();
int numTS = p_ts.Ncols();
if(p_zeropad == 0)
p_zeropad = sizeTS;
if(lag == 0)
lag = sizeTS;
ColumnVector x(p_zeropad);
x = 0;
ColumnVector fft_real;
ColumnVector fft_im;
ColumnVector dummy(p_zeropad);
ColumnVector dummy2;
dummy = 0;
ColumnVector realifft(p_zeropad);
ret.ReSize(lag,numTS);
ret = 0;
for(int i = 1; i <= numTS; i++)
{
x.Rows(1,sizeTS) = p_ts.Column(i);
FFT(x, dummy, fft_real, fft_im);
for(int j = 1; j <= p_zeropad; j++)
{
// (x+iy)(x-iy) = x^2 + y^2
fft_real(j) = fft_real(j)*fft_real(j) + fft_im(j)*fft_im(j);
fft_im(j) = 0;
}
FFTI(fft_real, fft_im, realifft, dummy2);
float varx = var(x.Rows(1,sizeTS)).AsScalar();
ret.Column(i) = realifft.Rows(1,lag);
Mark Jenkinson
committed
for(int j = 1; j <= lag-1; j++)
{
// Correction to make autocorr unbiased and normalised
Mark Jenkinson
committed
ret(j,i) = ret(j,i)/((sizeTS-j)*varx);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
}
}
}
ReturnMatrix xcorr(const Matrix& p_ts, int lag, int p_zeropad )
{
Matrix r;
xcorr(p_ts,r,lag,p_zeropad);
r.Release();
return r;
}
void detrend(Matrix& p_ts, int p_level)
{
Tracer trace("MISCMATHS::detrend");
int sizeTS = p_ts.Nrows();
// p_ts = b*a + e (OLS regression)
// e is detrended data
Matrix a(sizeTS, p_level+1);
// Create a
for(int t = 1; t <= sizeTS; t++)
{
for(int l = 0; l <= p_level; l++)
Mark Jenkinson
committed
a(t,l+1) = pow((float)t/sizeTS,l);
Matrix R = IdentityMatrix(sizeTS)-a*pinv(a);
Mark Jenkinson
committed
for(int t = 1; t <= sizeTS; t++)
Mark Jenkinson
committed
p_ts.Column(t) = R*p_ts.Column(t);
}
}
ReturnMatrix read_vest(string p_fname)
{
ifstream in;
in.open(p_fname.c_str(), ios::in);
if(!in) throw Exception(string("Unable to open "+p_fname).c_str());
int numWaves = 0;
int numPoints = 0;
string str;
while(true)
{
if(!in.good()) throw Exception(string(p_fname+" is not a valid vest file").c_str());
in >> str;
if(str == "/Matrix")
break;
else if(str == "/NumWaves")
{
in >> numWaves;
}
else if(str == "/NumPoints" || str == "/NumContrasts")
{
in >> numPoints;
}
}
Matrix p_mat(numPoints, numWaves);
for(int i = 1; i <= numPoints; i++)
{
for(int j = 1; j <= numWaves; j++)
{
if (!in.eof()) in >> ws >> p_mat(i,j) >> ws;
else throw Exception(string(p_fname+" has insufficient data points").c_str());
}
}
in.close();
p_mat.Release();
return p_mat;
}
void ols(const Matrix& data,const Matrix& des,const Matrix& tc, Matrix& cope,Matrix& varcope){
// ols
// data is t x v
// des is t x ev (design matrix)
// tc is cons x ev (contrast matrix)
// cope and varcope will be cons x v
// but will be resized if they are wrong
// hence may be passed in uninitialised
// TB 2004
if(data.Nrows() != des.Nrows()){
cerr <<"MISCMATHS::ols - data and design have different number of time points"<<endl;
exit(-1);
}
if(des.Ncols() != tc.Ncols()){
cerr <<"MISCMATHS::ols - design and contrast matrix have different number of EVs"<<endl;
exit(-1);
}
Matrix pdes = pinv(des);
Matrix prevar=diag(tc*pdes*pdes.t()*tc.t());
Matrix R=IdentityMatrix(des.Nrows())-des*pdes;
float tR=R.Trace();
Matrix pe=pdes*data;
cope=tc*pe;
Matrix res=data-des*pe;
Matrix sigsq=sum(SP(res,res))/tR;
varcope=prevar*sigsq;
}
if ( des.Nrows() > 4000 ) //Use the simple version as huge designs require too much RAM in the full calculation
return des.Nrows() - des.Ncols();
try {
Matrix R=IdentityMatrix(des.Nrows())-des*pdes;
return R.Trace();}
catch (...) {
cerr << "ols_dof: Error in determining the trace, resorting to basic calculation" << endl;
}
return des.Nrows() - des.Ncols();
int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, int maxit,
float reltol)
{
// solves: A * x = b (for x)
// implementation of algorithm in Golub and Van Loan (3rd ed, page 527)
ColumnVector rk1, rk2, pk, apk;
double betak, alphak, rk1rk1=0, rk2rk2, r00=0;
int k=0;
rk1 = b - A*x; // a *big* calculation
for (int n=1; n<=maxit; n++) {
k++;
if (k==1) {
pk = rk1;
rk1rk1 = (rk1.t() * rk1).AsScalar();
} else {
rk2rk2 = rk1rk1; // from before
rk1rk1 = (rk1.t() * rk1).AsScalar();
if (rk2rk2<1e-10*rk1rk1) {
cerr << "WARNING:: Conj Grad - low demoninator (rk2rk2)" << endl;
if (rk2rk2<=0) {
cerr << "Aborting conj grad ..." << endl;
return 1;
}
}
betak = rk1rk1 / rk2rk2;
pk = rk1 + betak * pk; // note RHS pk is p(k-1) in algorithm
}
// stop if sufficient accuracy is achieved
if (rk1rk1<reltol*reltol*r00) return 0;
apk = A * pk; // the *big* calculation in this algorithm
ColumnVector pap = pk.t() * apk;
if (pap.AsScalar()<0) {
cerr << "ERROR:: Conj Grad - negative eigenvector found (matrix must be symmetric and positive-definite)\nAborting ... " << endl;
return 2;
} else if (pap.AsScalar()<1e-10) {
cerr << "WARNING:: Conj Grad - nearly null eigenvector found (terminating early)" << endl;
return 1;
} else {
alphak = rk1rk1 / pap.AsScalar();
}
x = x + alphak * pk; // note LHS is x(k) and RHS is x(k-1) in algorithm
rk2 = rk1; // update prior to the next step
rk1 = rk1 - alphak * apk; // note LHS is r(k) in algorithm
}
return 0;
}
int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, int maxit)
{
return conjgrad(x,A,b,maxit,1e-10);
}
float csevl(const float x, const ColumnVector& cs, const int n)
{
float b0 = 0;
float b1 = 0;
float b2 = 0;
const float twox=2*x;
for(int i=1; i<=n; i++)
{
b2=b1;
b1=b0;
b0=twox*b1-b2+cs(n+1-i);
}
return 0.5*(b0-b2);
}
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
float digamma(const float x)
{
int ntapsi(16);
int ntpsi(23);
ColumnVector psics(ntpsi);
ColumnVector apsics(ntapsi);
psics << -.038057080835217922E0<<
.49141539302938713E0<<
-.056815747821244730E0<<
.008357821225914313E0<<
-.001333232857994342E0<<
.000220313287069308E0<<
-.000037040238178456E0<<
.000006283793654854E0<<
-.000001071263908506E0<<
.000000183128394654E0<<
-.000000031353509361E0<<
.000000005372808776E0<<
-.000000000921168141E0<<
.000000000157981265E0<<
-.000000000027098646E0<<
.000000000004648722E0<<
-.000000000000797527E0<<
.000000000000136827E0<<
-.000000000000023475E0<<
.000000000000004027E0<<
-.000000000000000691E0<<
.000000000000000118E0<<
-.000000000000000020E0;
apsics <<-.0204749044678185E0<<
-.0101801271534859E0<<
.0000559718725387E0<<
-.0000012917176570E0<<
.0000000572858606E0<<
-.0000000038213539E0<<
.0000000003397434E0<<
-.0000000000374838E0<<
.0000000000048990E0<<
-.0000000000007344E0<<
.0000000000001233E0<<
-.0000000000000228E0<<
.0000000000000045E0<<
-.0000000000000009E0<<
.0000000000000002E0<<
-.0000000000000000E0;
float y = fabs(x);
float psi;
if(y<2.0)
{
// do we need to deal with the following case?
// c psi(x) for -2. .lt. x .lt. 2.
int n = int(floor(x));
y = x - n;
n = n - 1;
psi = csevl(2*y-1, psics, ntpsi);
if(n==-1)
{
psi = psi - 1.0/x;
}
}
else
{
const float aux = csevl(8/(Sqr(y))-1, apsics, ntapsi);
Mark Jenkinson
committed
psi = log(x) - 0.5/x + aux;
void glm_vb(const Matrix& X, const ColumnVector& Y, ColumnVector& B, SymmetricMatrix& ilambda_B, int niters)
// Does Variational Bayes inference on GLM Y=XB+e with ARD priors on B
// design matrix X should be num_tpts*num_evs
/////////////////////
// setup
OUT("Setup");
int ntpts=Y.Nrows();
throw Exception("COCK");
OUT(nevs);
OUT(ntpts);
ColumnVector gam_m(nevs);
gam_m=1e10;
float gam_y;
ColumnVector lambdaB(nevs);
if(nevs<ntpts-10)
// initialise with OLS
B=pinv(X)*Y;
ColumnVector res=Y-X*B;
gam_y=(ntpts-nevs)/(res.t()*res).AsScalar();
ilambda_B << (X.t()*X*gam_y).i();
lambdaB=0;
for(int l=1; l <= nevs; l++)
{
lambdaB(l)=ilambda_B(l,l);
}
else
{
OUT("no ols");
B.ReSize(nevs);
B=0;
lambdaB=1;
// ColumnVector res=Y-X*B;
// gam_y=ntpts/(res.t()*res).AsScalar();
gam_y=10;
}
// OUT(B(1));
// OUT(lambdaB(1));
float trace_ilambdaZZ=1;
SymmetricMatrix ZZ;
float YY=0;
for(int t=1; t <= ntpts; t++)
YY += Sqr(Y(t));
/////////////////////
// iterate
OUT("Iterate");
int i = 1;;
for(; i<=niters; i++)
{
Mark Jenkinson
committed
for(int l=1; l <= nevs; l++)
Mark Jenkinson
committed
float b_m = 1.0/(0.5*(Sqr(B(l))+lambdaB(l))+1.0/b_m0);
gam_m(l) = b_m*c_m;
////////////////////
// update B
ColumnVector beta(nevs);
beta = 0;
SymmetricMatrix lambda_B(nevs);
lambda_B = 0;
Mark Jenkinson
committed
for(int l=1; l <= nevs; l++)
lambda_B(l,l)=gam_m(l);
SymmetricMatrix tmp = lambda_B + gam_y*ZZ;
lambda_B << tmp;
beta += gam_y*ZY;
ilambda_B << lambda_B.i();
lambdaB.ReSize(nevs);
lambdaB=0;
for(int l=1; l <= nevs; l++)
{
lambdaB(l)=ilambda_B(l,l);
}
////////////////////
// compute trace for noise precision phiy update
SymmetricMatrix tmp3;
tmp3 << ilambda_B;
SymmetricMatrix tmp2;
tmp2 << tmp3*ZZ;
trace_ilambdaZZ=tmp2.Trace();
/////////////////////
// update phiy
float b_y0 = 1e10;
float c_y0 = 1;
float sum = YY + (B.t()*ZZ*B).AsScalar() - 2*(B.t()*ZY).AsScalar();
float b_y = 1.0/(0.5*(sum + trace_ilambdaZZ)+1/b_y0);
gam_y = b_y*c_y;
// OUT(gam_y);
}
vector<float> ColumnVector2vector(const ColumnVector& col)
{
vector<float> vec(col.Nrows());
for(int c = 0; c < col.Nrows(); c++)
vec[c] = col(c+1);
return vec;
}
Mark Jenkinson
committed
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Uninteresting byte swapping functions
Mark Jenkinson
committed
typedef struct { unsigned char a,b ; } TWObytes ;
Mark Jenkinson
committed
void Swap_2bytes( int n , void *ar ) /* 2 bytes at a time */
{
register TWObytes *tb = (TWObytes *)ar ;
Mark Jenkinson
committed
register unsigned char tt ;
for( ii=0 ; ii < n ; ii++ ){
tt = tb[ii].a ; tb[ii].a = tb[ii].b ; tb[ii].b = tt ;
}
return ;
}
/*---------------------------------------------------------------------------*/
typedef struct { unsigned char a,b,c,d ; } FOURbytes ;
Mark Jenkinson
committed
void Swap_4bytes( int n , void *ar ) /* 4 bytes at a time */
{
register int ii ;
register FOURbytes *tb = (FOURbytes *)ar ;
Mark Jenkinson
committed
register unsigned char tt ;
for( ii=0 ; ii < n ; ii++ ){
tt = tb[ii].a ; tb[ii].a = tb[ii].d ; tb[ii].d = tt ;
tt = tb[ii].b ; tb[ii].b = tb[ii].c ; tb[ii].c = tt ;
}
return ;
}
/*---------------------------------------------------------------------------*/
typedef struct { unsigned char a,b,c,d , D,C,B,A ; } EIGHTbytes ;
Mark Jenkinson
committed
void Swap_8bytes( int n , void *ar ) /* 8 bytes at a time */
{
register int ii ;
register EIGHTbytes *tb = (EIGHTbytes *)ar ;
Mark Jenkinson
committed
register unsigned char tt ;
for( ii=0 ; ii < n ; ii++ ){
tt = tb[ii].a ; tb[ii].a = tb[ii].A ; tb[ii].A = tt ;
tt = tb[ii].b ; tb[ii].b = tb[ii].B ; tb[ii].B = tt ;
tt = tb[ii].c ; tb[ii].c = tb[ii].C ; tb[ii].C = tt ;
tt = tb[ii].d ; tb[ii].d = tb[ii].D ; tb[ii].D = tt ;
}
return ;
}
/*---------------------------------------------------------------------------*/
typedef struct { unsigned char a,b,c,d,e,f,g,h ,
H,G,F,E,D,C,B,A ; } SIXTEENbytes ;
Mark Jenkinson
committed
void Swap_16bytes( int n , void *ar ) /* 16 bytes at a time */
{
register int ii ;
register SIXTEENbytes *tb = (SIXTEENbytes *)ar ;
Mark Jenkinson
committed
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
register unsigned char tt ;
for( ii=0 ; ii < n ; ii++ ){
tt = tb[ii].a ; tb[ii].a = tb[ii].A ; tb[ii].A = tt ;
tt = tb[ii].b ; tb[ii].b = tb[ii].B ; tb[ii].B = tt ;
tt = tb[ii].c ; tb[ii].c = tb[ii].C ; tb[ii].C = tt ;
tt = tb[ii].d ; tb[ii].d = tb[ii].D ; tb[ii].D = tt ;
tt = tb[ii].e ; tb[ii].e = tb[ii].E ; tb[ii].E = tt ;
tt = tb[ii].f ; tb[ii].f = tb[ii].F ; tb[ii].F = tt ;
tt = tb[ii].g ; tb[ii].g = tb[ii].G ; tb[ii].G = tt ;
tt = tb[ii].h ; tb[ii].h = tb[ii].H ; tb[ii].H = tt ;
}
return ;
}
/*---------------------------------------------------------------------------*/
void Swap_Nbytes( int n , int siz , void *ar ) /* subsuming case */
{
switch( siz ){
case 2: Swap_2bytes ( n , ar ) ; break ;
case 4: Swap_4bytes ( n , ar ) ; break ;
case 8: Swap_8bytes ( n , ar ) ; break ;
case 16: Swap_16bytes( n , ar ) ; break ;
}
return ;
}
// end namespace MISCMATHS
}