Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
919 changes: 609 additions & 310 deletions documentation/mathematica/NewtonianBC.nb

Large diffs are not rendered by default.

5,438 changes: 3,549 additions & 1,889 deletions documentation/mathematica/PostNewtonian.nb

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions lib/Splinor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ double Splinor::interp(double xpos){
if(!wrap) xind = (xind >=0 ? 0 : len-2);
//if we've already wrapped, quit with error
else if (wrap){
//printf("ERROR: could not find that value, you cad!\n");
return nan("array wrap");
}
//set flag to note we've wrapped through array once
Expand Down Expand Up @@ -102,6 +103,7 @@ double Splinor::deriv(double xpos){
if(!wrap) xind = 0;
//if we've already wrapped, quit with error
else if (wrap){
//printf("ERROR: could not find that value, you cad!\n");
return nan("");
}
//set flag to note we've wrapped through array once
Expand Down
46 changes: 29 additions & 17 deletions lib/chandra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,14 @@ double Chandrasekhar::factor_f(double x){
}

double Chandrasekhar::factor_g(double x){
return 8.*x*x*x*(sqrt(1.+x*x) - 1.0) - factor_f(x);
return 8.*x*x*x*(sqrt(1.+x*x) - 1.0) - factor_f(x);
}

double Chandrasekhar::factor_h(double x, double sigma, double mue=1.0){
return mue*x*x*x + sigma*factor_g(x);
}


//methods usign the weights and abscissae of Sagar 1991
// these methods have tested accuracy for -1 < eta < 10
// for regions eta > 10, Sagar suggests using asymptotic approximation
Expand Down Expand Up @@ -63,26 +68,33 @@ double FermiDirac::FermiDirac(double k, double eta){
// here beta = k*T/(m c^2)
// see Tooper 1969, ApJ 156
double FermiDirac::FermiDirac(double k, double eta, double beta) {
double integral=0.0, int1=0.0, int2=0.0;
double x = 0.0, dx = eta/500.0;
while(x < 2.*eta){
x += dx;
int1 = int2;
int2 = pow(x,k)*sqrt(1.+0.5*beta*x)/(1. + exp(x-eta));
integral += 0.5*(int1+int2)*dx;
}
while(int2 > 1e-14){
dx = (x/100.0);
// int k2 = 2*int(k);
// if (k2 == 1) return FermiDirac1Half(eta,beta);
// else if (k2 == 3) return FermiDirac3Half(eta,beta);
// else if (k2 == 5) return FermiDirac5Half(eta,beta);
//if it isn't one of the cases covered below, then just use trapezoidal integration
// else {
double integral=0.0, int1=0.0, int2=0.0;
double x = 0.0, dx = eta/500.0;
while(x < 2.*eta){
x += dx;
int1 = int2;
int2 = pow(x,k)*sqrt(1.+0.5*beta*x)/(1. + exp(x-eta));
integral += 0.5*(int1+int2)*dx;
}
while(int2 > 1e-14){
dx = (x/100.0);
x += dx;
int1 = int2;
int2 = pow(x,k)*sqrt(1.+0.5*beta*x)/(1. + exp(x-eta));
integral += 0.5*(int1+int2)*dx;
}
x += dx;
int1 = int2;
int2 = pow(x,k)*sqrt(1.+0.5*beta*x)/(1. + exp(x-eta));
integral += 0.5*(int1+int2)*dx;
}
x += dx;
int1 = int2;
int2 = pow(x,k)*sqrt(1.+0.5*beta*x)/(1. + exp(x-eta));
integral += 0.5*(int1+int2)*dx;
return integral;
return integral;
// }
}
//partial derivatives
double FermiDirac::FermiDiracdelEta(double k, double eta, double beta) {
Expand Down
3 changes: 3 additions & 0 deletions lib/chandra.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,14 @@
#include "../src/constants.h"

namespace Chandrasekhar {
// const double A0 = 6.002332e22;
// const double B0 = 9.81019e5;
const double A0 = 6.0406e22;
const double B0 = 9.8848e5;

double factor_f(double x);
double factor_g(double x);
double factor_h(double x, double, double);
};

namespace FermiDirac {
Expand Down
38 changes: 26 additions & 12 deletions lib/matrix.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
//calculate the determinant of an NxN matrix
//uses LU decomposition, based on GSL function
//will destroy the matrix m
template <size_t N>
double determinant(double (&m)[N][N]){
double det=1.0, temp = 0.0, big = 0.0, ajj=0.0, coeff=0.0;
template <size_t N, class T>
T determinant(T (&m)[N][N]){
T det=1.0, temp = 0.0, big = 0.0, ajj=0.0, coeff=0.0;
int i=0,j=0,k=0,ipiv=0;

for(j=0; j<N-1;j++){
Expand All @@ -26,7 +26,10 @@ double determinant(double (&m)[N][N]){
big=temp;
ipiv=i;
}
}
}
//if max element is 0, then singular matrix
//if(big==0.0) return 0.0;

//if max is not on diagonal, swap rows
if(ipiv != j){
for(k=0;k<N;k++){
Expand All @@ -46,7 +49,12 @@ double determinant(double (&m)[N][N]){
}
}
}
det *= m[j][j];
//if the diagonal element is zero, this matrix is singular
else{
// return 0.0;
}
det *= m[j][j];

}
det *= m[N-1][N-1];
return det;
Expand All @@ -55,12 +63,12 @@ double determinant(double (&m)[N][N]){
//in equation Ax=b, invert NxN matrix A to solve for x
//will destroy the matrix A
//can handle equations Ax=0
template <size_t N>
int invertMatrix(double (&A)[N][N], double(&b)[N], double(&x)[N]){
template <size_t N, class vartype>
int invertMatrix(vartype (&A)[N][N], vartype(&b)[N], vartype(&x)[N]){
//void invertMatrix(int N, double** A, double *b, double *x){
//a flag, in case we are soving homoegenous problem
bool HOMOGENEOUS = true;
double dummy = 0.0;
vartype dummy = 0.0;
for(int j=0; j<N; j++) HOMOGENEOUS &= (b[j]==0.0);
//reduce to row-echelon form
int L=0, indxBottom = N-1;
Expand All @@ -83,7 +91,8 @@ int invertMatrix(double (&A)[N][N], double(&b)[N], double(&x)[N]){
}
//switch more rows to bottom as time goes on
indxBottom--;
if(indxBottom <= 0) {printf("I think your whole matrix is zero...\n"); return 1;}
if(indxBottom <= 0) {
printf("ERROR in invertMatrix: I think your whole matrix is zero...\n"); return 1;}
break;
}
}
Expand All @@ -108,14 +117,14 @@ int invertMatrix(double (&A)[N][N], double(&b)[N], double(&x)[N]){
}
}
//now that we have nonzero row, divide everthing by front element
double a = 1.0/A[i][L];
vartype a = 1.0/A[i][L];
b[i] *= a;
for(int j=0; j<N; j++){
A[i][j] *= a;
}
A[i][L] = 1.0;//set to one, for good measure
//now that we have a leading one, remove column elements below
double lead = 0.0;
vartype lead = 0.0;
for(int j=i+1; j<N; j++){
lead = A[j][L];
for(int k=0; k<N;k++){
Expand Down Expand Up @@ -161,7 +170,12 @@ int invertMatrix(double (&A)[N][N], double(&b)[N], double(&x)[N]){
for(int j=L+1; j<N; j++){
x[i] -= A[i][j]*x[j];
}
}
}
bool produced_nan = false;
for(int i=0; i<N; i++){
if(isnan(x[i])) produced_nan = true;
}
if(produced_nan){printf("ERROR in invertMatrix: NaNs produced\n"); return 1;}
return 0;
}

Expand Down
198 changes: 198 additions & 0 deletions lib/rootfind.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
#ifndef ROOTFINDINGMETHODS_C
#define ROOTFINDINGMETHODS_C

#include "rootfind.h"
#include <complex>

double pseudo_unif(){
static int a=53, b=122, r=17737, dart = 39;//for pseudorandom positioning
dart = (a*dart+b)%r; //generates a psuedo-random integer in (0,r)
return (double(dart)/double(r));
}

// *************************************************************************************
// BISECTION METHODS
// These methods are for implementing bisection searches in one parameter
// *************************************************************************************

// this will find brackets using a simple expansion/movement search
// one bracket is moved to one side until the function changes sign, to find brackets
void bisection_find_brackets_move(
std::function<double(double)> func, //the function to find zero of
double x0, //an initial guess for the zero
double& xmin, //the lower bracket -- will be returned
double& xmax //the upper bracket -- will be returned
){
double x=x0, y = func(x), ymin, ymax;
double dx = x0;

//find brackets on x that bound a zero in y

if(y > 0){
xmin = x; ymin = y;
while(y > 0 && !isnan(y)){
x += dx;
y = func(x);
if(isnan(y)){
x = x0; dx *= 0.1; y = 1.0;
}
}
xmax = x; ymax = y;
}
else if (y < 0){
xmax = x; ymax = y;
while(y < 0){
x *= 0.5;
y = func(x);
}
xmin = x; ymin = y;
}

//swap if they are backwards
if(xmin > xmax){
double temp = xmax;
xmax = xmin; xmin = temp;
temp = ymax;
ymax = ymin; ymin = temp;
}
}

// this will find brackets using newton's method to look for the next zero, then a bit beyond it
// why use one zero-finding method to prepare another zero-finding method?
// because Newton's method can fail, but bisection searches can go to nearly arbitrary accuracy
void bisection_find_brackets_newton(
std::function<double(double)> func, //the function to find zero of
double x, //an initial guess for the zero
double& xmin, //the lower bracket -- will be returned
double& xmax //the upper bracket -- will be returned
){
double y1=func(x), y2=y1;
double x1=x, x2=x;
double xdx, ydx;
double ymax, ymin;
//while the two ys are on same side of axis, keep reposition until zero is bound
//we use Newton's method to the nearest zero
while(y1*y2 >= 0.0){
//compute numerical derivative
xdx = 1.01*x2;
ydx = func(xdx);
xdx = x2 - y2*(xdx-x2)/(ydx-y2);
//Newton's method can fail at this if it only approaches the zero from one direction
//In such a case, slightly broaden the bracket to get to other side of zero
if( xdx == x2 ) {
if(xdx>x1) xdx *= 1.01;
if(xdx<x1) xdx *= 0.99;
}
//limit amount of change allowed in single step
if(xdx > 1.1*x2) xdx = 1.1*x2; //if we increased, don't increase too much
if(xdx < 0.9*x2) xdx = 0.9*x2; //if we decreased, don't decrease too much
ydx = y2;
y2 = func(xdx);
x2 = xdx;
}
//sometimes the brackets will be on either end of hyperbolic divergence
//check that solution changes continuously between the two brackets
double x3 = 0.5*(x1+x2), y3 = func(x3);
double scale = 1.01;
while( (y3*y1>=0.0 & fabs(y3)>fabs(y1)) | (y3*y2>=0.0 & fabs(y3)>fabs(y2))){
//this can sometimes be solved by taking broader steps in secant method
scale *= 1.1;
x2=x1;
y2=y1;
while(y1*y2 >= 0.0){
//compute numerical derivative
xdx = scale*x2;
ydx = func(xdx);
xdx = x2 - y2*(xdx-x2)/(ydx-y2);
//Newton's method can fail at this if it only approaches the zero from one direction
//In such a case, slightly broaden the bracket to get to other side of zero
if( xdx == x2 ) {
if(xdx>x1) xdx *= 1.01;
if(xdx<x1) xdx *= 0.99;
}
ydx = y2;
y2 = func(xdx);
x2 = xdx;
}
x3 = 0.5*(x1+x2);
y3 = func(x3);
}

if(x2 > x1){
xmax = x2; ymax = y2;
xmin = x1; ymin = y1;
}
else {
xmax = x1; ymax = y1;
xmin = x2; ymin = y2;
}
//swap if they are backwards
if(xmin > xmax){
double temp = xmax;
xmax = xmin; xmin = temp;
temp = ymax;
ymax = ymin; ymin = temp;
}


}

//given brackets bounding a single zero, find the zero
// the brackets xmin, xmax MUST bound a single zero
double bisection_search(
std::function<double(double)> func, //the function to find zero of
double &x, //the location of zero -- will be returned
double xmin, //the lower bracket
double xmax //the upper bracket
){
//now use bisection to find dx so that y=0.0
x = 0.5*(xmin+xmax);
double y = func(x), y2 = y;
double ymin = func(xmin), ymax=func(xmax);
double xold = x;
while( fabs(y)>0.0 || isnan(y) ){
//printf("BISECT [%le %le], %le\n", xmin, xmax, x);
if( (y*ymax>0.0) ){
xmax = x;
ymax = y;
}
else if( (y*ymin>0.0) ){
xmin = x;
ymin = y;
}
x = 0.5*(xmin+xmax);
y = func(x);

//it can happen that the search becomes stuck
// in this case, picking pseudorandom location within brackets can sometimes help
if(y2==y){//if the problem becomes stuck in a loop
x = xmin + pseudo_unif()*fabs(xmax-xmin);
y = func(x);
}
y2 = y;
//if the brackets are not moving, stop the search
if(xold == fabs(xmax-xmin)) break;
xold = fabs(xmax-xmin);
}
x = 0.5*(xmin+xmax);
return func(x);
}



// *************************************************************************************
// NEWTON METHODS
// These methods are for gradient-descent methods
// *************************************************************************************

template<>
double gen_abs<double> (double x){
return fabs(x);
}

template<>
std::complex<double> gen_abs(std::complex<double> x){
return std::abs(x);
}

#endif
Loading