Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Detach Tensor3D from MATLAB #290

Merged
merged 21 commits into from
Jun 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
8415c6b
Add path_exists functions
willGraham01 Jun 5, 2023
39e298d
Write tests for new function, path_exists
willGraham01 Jun 5, 2023
1a89b54
Add option to force path_exists to error
willGraham01 Jun 5, 2023
b074002
Update HDF5Reader methods now that read_dataset_in_group() is redundant
willGraham01 Jun 5, 2023
14cd103
Remove unused variables that rebase barfed
willGraham01 Jun 5, 2023
21aa7e8
Rework Tensor3D class
willGraham01 May 19, 2023
553f212
Fix Tensor3D immediate tests
willGraham01 May 10, 2023
9465bc4
Fix derived classes part 1
willGraham01 May 10, 2023
a2da09e
Pass by const reference when passing indicies
willGraham01 May 10, 2023
d80b9d3
Fix dependant classes part 2
willGraham01 Jun 1, 2023
142bc8c
Fix dependant classes part 3 [TDMS builds]
willGraham01 May 10, 2023
42e5b94
Fix bug in assignment for Tensor3D
willGraham01 May 19, 2023
7def0ee
Breakout array/ subfolder for future MATLAB removal ease. Move DTilde…
willGraham01 Jun 1, 2023
63f7275
Add docstrings to broken-out files
willGraham01 May 11, 2023
1d206f4
Rely on call() operator only
willGraham01 May 11, 2023
cd15360
Remove now-unnecessary commented-out code
willGraham01 May 11, 2023
5776ecd
This should work, but it causes a seg-fault in the BLI tests.
willGraham01 May 11, 2023
648723a
use std::abs in templates to avoid 0-casts
willGraham01 May 12, 2023
6b31724
Tensor3D should "have-a" vector, but not "be-a" vector.
willGraham01 May 12, 2023
d90f71c
Apply suggestions from code review
willGraham01 Jun 5, 2023
c849ebe
Fix linting
willGraham01 Jun 5, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
122 changes: 1 addition & 121 deletions tdms/include/arrays.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#include <fftw3.h>

#include "arrays/tensor3d.h"
#include "globals.h"
#include "matlabio.h"
#include "utils.h"
Expand Down Expand Up @@ -354,127 +355,6 @@ class Pupil : public Matrix<double> {
~Pupil();
};

template<typename T>
willGraham01 marked this conversation as resolved.
Show resolved Hide resolved
class Tensor3D {
protected:
int n_layers = 0;
int n_cols = 0;
int n_rows = 0;
T ***tensor = nullptr;

public:
bool is_matlab_initialised = false;

Tensor3D() = default;

Tensor3D(T ***tensor, int n_layers, int n_cols, int n_rows) {
initialise(tensor, n_layers, n_cols, n_rows);
}

void initialise(T ***_tensor, int _n_layers, int _n_cols, int _n_rows) {
tensor = _tensor;
n_layers = _n_layers;
n_cols = _n_cols;
n_rows = _n_rows;
}

inline T **operator[](int value) const { return tensor[value]; };

bool has_elements() { return tensor != nullptr; };

void zero() {
for (int k = 0; k < n_layers; k++)
for (int j = 0; j < n_cols; j++)
for (int i = 0; i < n_rows; i++) { tensor[k][j][i] = 0; }
}

void allocate(int nK, int nJ, int nI) {
n_layers = nK, n_cols = nJ, n_rows = nI;
tensor = (T ***) malloc(n_layers * sizeof(T **));

for (int k = 0; k < n_layers; k++) {
tensor[k] = (T **) malloc(n_cols * sizeof(T *));
}

for (int k = 0; k < n_layers; k++) {
for (int j = 0; j < n_cols; j++) {
tensor[k][j] = (T *) malloc(n_rows * sizeof(T));
}
}
};

/**
* @brief Computes the Frobenius norm of the tensor
*
* fro_norm = \f$\sqrt{ \sum_{i=0}^{I_tot}\sum_{j=0}^{J_tot}\sum_{k=0}^{K_tot}
* |t[k][j][i]|^2 }\f$
*/
double frobenius() {
T norm_val = 0;
for (int i1 = 0; i1 < n_layers; i1++) {
for (int i2 = 0; i2 < n_cols; i2++) {
for (int i3 = 0; i3 < n_rows; i3++) {
norm_val += abs(tensor[i1][i2][i3]) * abs(tensor[i1][i2][i3]);
}
}
}
return sqrt(norm_val);
}

~Tensor3D() {
if (tensor == nullptr) return;
if (is_matlab_initialised) {
free_cast_matlab_3D_array(tensor, n_layers);
} else {
for (int k = 0; k < n_layers; k++) {
for (int j = 0; j < n_cols; j++) { free(tensor[k][j]); }
free(tensor[k]);
}
free(tensor);
}
}
};

/**
* @brief Stores the fibre modes in the Fourier plane of the objective lens.
*
* The "Tilde" indicates that these quantities are in a Fourier plane relative
* to where the optical fibre is actually located, meaning that is has a Fourier
* relationship relative to the physical fibre mode(s).
*/
class DTilde {
protected:
int n_det_modes = 0;//< Number of modes specified
static void set_component(Tensor3D<std::complex<double>> &tensor,
const mxArray *ptr, const std::string &name,
int n_rows, int n_cols);

public:
/** @brief Fetch the number of modes */
inline int num_det_modes() const { return n_det_modes; };

/*! 3-dimensional vector of fibre modes indexed by (j, i, i_m).
* i and j index over the x and y plane respectively.
* i_m indexes the different modes specified in the input file.*/
Tensor3D<std::complex<double>> x;
/*! @copydoc x */
Tensor3D<std::complex<double>> y;

void initialise(const mxArray *ptr, int n_rows, int n_cols);
};

class IncidentField {
protected:
void set_component(Tensor3D<double> &component, const mxArray *ptr,
const std::string &name);

public:
Tensor3D<double> x;
Tensor3D<double> y;

explicit IncidentField(const mxArray *ptr);
};

/**
* List of field components as integers
*/
Expand Down
55 changes: 55 additions & 0 deletions tdms/include/arrays/dtilde.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/**
* @file dtilde.h
* @brief Container class for the DTilde object, storing the fibre modes in the
* Fourier plane of the objective lens.
*/
#pragma once

#include <complex.h>
#include <string>
#include <vector>

#include "arrays/tensor3d.h"
#include "matrix.h"

/**
* @brief Stores the fibre modes in the Fourier plane of the objective lens.
* @details The "Tilde" indicates that these quantities are in a Fourier plane
* relative to where the optical fibre is actually located, meaning
* that it has a Fourier relationship relative to the physical fibre
* mode(s).
*/
class DTilde {
protected:
int n_det_modes = 0;//<! Number of modes specified

/**
* @brief Set one of the two components by reading from an existing MATLAB
* buffer.
*
* @param tensor Component to read into
* @param ptr Pointer to the [struct] memory buffer to read from
* @param name [Debug] Name of the field to read from
* @param n_rows,n_cols Dimensions to assign to the component
*/
static void set_component(Tensor3D<std::complex<double>> &tensor,
const mxArray *ptr, const std::string &name,
int n_rows, int n_cols);

public:
DTilde() = default;

/** @brief (Fetch the) number of fibre modes specified */
int num_det_modes() const { return n_det_modes; };

Tensor3D<std::complex<double>> x;
Tensor3D<std::complex<double>> y;

/**
* @brief Initialise the fibre modes by reading from a MATLAB buffer.
*
* @param ptr Pointer to a MATLAB struct containing the data to read
* @param n_rows,n_cols Dimensions to assign to the component
*/
void initialise(const mxArray *ptr, int n_rows, int n_cols);
};
22 changes: 22 additions & 0 deletions tdms/include/arrays/incident_field.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/**
* @file incident_field.h
* @brief Declaration for the IncidentField object.
*/
#pragma once

#include <string>

#include "arrays/tensor3d.h"
#include "matrix.h"

class IncidentField {
willGraham01 marked this conversation as resolved.
Show resolved Hide resolved
protected:
void set_component(Tensor3D<double> &component, const mxArray *ptr,
const std::string &name);

public:
Tensor3D<double> x;
Tensor3D<double> y;

explicit IncidentField(const mxArray *ptr);
};
153 changes: 153 additions & 0 deletions tdms/include/arrays/tensor3d.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
/**
* @file tensor3d.h
* @author William Graham
* @brief template class for storing three-dimensional data arrays.
*/
#pragma once

#include <vector>

#include "cell_coordinate.h"

/**
* @brief Template class for storing three-dimensional data as a strided vector.
* @details Three dimensional data is stored as a strided vector, in row-major
* (C) format. The last listed dimension has the fastest varying index, and the
* first listed dimension has the slowest listed. Explicitly,
*
* this->(i, j, k) = this[i*n_cols*n_layers + j*n_layers + k].
*
* This is consistent with the way hdf5 files store array-like data, see
* https://support.hdfgroup.org/HDF5/doc1.6/UG/12_Dataspaces.html.
* @tparam T Numerical datatype
*/
template<typename T>
class Tensor3D {
private:
/** @brief Convert a 3D (i,j,k) index to the corresponding index in the
* strided storage. */
int to_global_index(int i, int j, int k) const {
return i * n_layers_ * n_cols_ + j * n_layers_ + k;
}
int to_global_index(const ijk &index_3d) const {
return to_global_index(index_3d.i, index_3d.j, index_3d.k);
}

protected:
int n_layers_ = 0;
int n_cols_ = 0;
int n_rows_ = 0;

/*! Strided vector that will store the array data */
std::vector<T> data_;

/*! The total number of elements, according to the dimension values currently
* set. */
int total_elements() const { return n_layers_ * n_cols_ * n_rows_; }

public:
Tensor3D() = default;
Tensor3D(int n_layers, int n_cols, int n_rows) {
allocate(n_layers, n_cols, n_rows);
}
Tensor3D(T ***buffer, int n_layers, int n_cols, int n_rows) {
initialise(buffer, n_layers, n_cols, n_rows);
}

/**
* @brief Subscript operator for the Tensor, retrieves the (i,j,k)-th element.
* @note Does not 'call' the tensor, but rather accesses the data.
* @example ...
* @seealso Tensor3D::operator[](const ijk&)
*/
T &operator()(int i, int j, int k) { return data_[to_global_index(i, j, k)]; }
T operator()(int i, int j, int k) const {
return data_[to_global_index(i, j, k)];
}

/** @brief Subscript operator for the Tensor, retrieving the (i,j,k)-th
* element. */
T &operator[](const ijk &index_3d) {
return data_[to_global_index(index_3d)];
}
T operator[](const ijk &index_3d) const {
return data_[to_global_index(index_3d)];
}

/**
* @brief Whether the tensor contains any elements.
* @details Strictly speaking, checks whether the total_elements() method
* returns 0, indicating that this tensor has no size and thus no elements.
*/
bool has_elements() const { return total_elements() != 0; }

/**
* @brief Allocate memory for this tensor given the dimensions passed.
*
* @param n_layers,n_cols,n_rows Number of layers, columns, and rows to
* assign.
*/
void allocate(int n_layers, int n_cols, int n_rows) {
n_layers_ = n_layers;
n_cols_ = n_cols;
n_rows_ = n_rows;
data_.resize(total_elements());
}

/**
* @brief Initialise this tensor from a 3D-buffer of matching size.
* @details Data values are copied so that membership over this->data() is
* preserved.
* @param buffer 3D buffer to read from
* @param n_layers,n_cols,n_rows "Shape" of the read buffer to assign to this
* tensor.
* @param buffer_leads_n_layers If true, the buffer to read from is
* assumed to have dimensions [n_layers][n_cols][n_rows]. If false, it is
* assumed to have the dimensions in reverse order,
* [n_rows][n_cols][n_layers].
*/
void initialise(T ***buffer, int n_layers, int n_cols, int n_rows,
bool buffer_leads_n_layers = false) {
allocate(n_layers, n_cols, n_rows);
if (buffer_leads_n_layers) {
for (int k = 0; k < n_layers_; k++) {
for (int j = 0; j < n_cols_; j++) {
for (int i = 0; i < n_rows_; i++) {
operator()(i, j, k) = buffer[k][j][i];
}
}
}
} else {
for (int k = 0; k < n_layers_; k++) {
for (int j = 0; j < n_cols_; j++) {
for (int i = 0; i < n_rows_; i++) {
operator()(i, j, k) = buffer[i][j][k];
}
}
}
}
}

/** @brief Set all elements in the tensor to 0. */
void zero() { std::fill(data_.begin(), data_.end(), 0); }

/**
* @brief Computes the Frobenius norm of the tensor.
* @details The Frobenius norm is defined as:
*
* fro_norm = \f$\sqrt{
* \sum_{i=0}^{n_rows}\sum_{j=0}^{n_cols}\sum_{k=0}^{n_layers} |t[k][j][i]|^2
* }\f$.
*
* In practice, our data is flat and since addition is commutative, we can
* just iterate over the flattened data structure instead of using nested for
* loops.
*/
double frobenius() const {
T norm_val = 0;
for (const T &element_value : data_) {
norm_val += std::abs(element_value) * std::abs(element_value);
}
return std::sqrt(norm_val);
}
};
3 changes: 0 additions & 3 deletions tdms/include/field.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,6 @@ class SplitFieldComponent : public Tensor3D<double> {
fftw_plan *plan_f = nullptr;// Forward fftw plan
fftw_plan *plan_b = nullptr;// Backward fftw plan

double **operator[](int value) const { return tensor[value]; };
double &operator[](ijk cell) const { return tensor[cell.k][cell.j][cell.i]; }

void initialise_from_matlab(double ***tensor, Dimensions &dims);

/**
Expand Down
Loading