aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThomas White <taw@bitwiz.org.uk>2011-07-02 23:21:31 +0200
committerThomas White <taw@physics.org>2012-02-22 15:27:31 +0100
commit47ba16fe47a0f4802d9744b3886e650fc3ae6fa7 (patch)
treef7b21355eb2109694eed0b527eecb50f4c6d31f7 /src
parent88cd387d8e0e7e1e6271a3df2fe10e7722ee5976 (diff)
Work on documentation
Diffstat (limited to 'src')
-rw-r--r--src/cell.c17
-rw-r--r--src/reflist.c146
-rw-r--r--src/reflist.h20
-rw-r--r--src/thread-pool.c21
-rw-r--r--src/thread-pool.h43
-rw-r--r--src/utils.c2
6 files changed, 229 insertions, 20 deletions
diff --git a/src/cell.c b/src/cell.c
index 36eae721..caeab3c7 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -110,6 +110,13 @@ UnitCell *cell_new()
}
+/**
+ * cell_free:
+ * @cell: A %UnitCell to free.
+ *
+ * Frees a %UnitCell, and all internal resources concerning that cell.
+ *
+ */
void cell_free(UnitCell *cell)
{
if ( cell == NULL ) return;
@@ -553,6 +560,16 @@ static const char *cell_rep(UnitCell *cell)
}
+/**
+ * cell_rotate:
+ * @in: A %UnitCell to rotate
+ * @quat: A %quaternion
+ *
+ * Rotate a %UnitCell using a %quaternion.
+ *
+ * Returns: a newly allocated rotated copy of @in.
+ *
+ */
UnitCell *cell_rotate(UnitCell *in, struct quaternion quat)
{
struct rvec a, b, c;
diff --git a/src/reflist.c b/src/reflist.c
index a88ce2c9..2db76914 100644
--- a/src/reflist.c
+++ b/src/reflist.c
@@ -299,9 +299,9 @@ void get_indices(const Reflection *refl,
/**
* get_symmetric_indices:
* @refl: A %Reflection
- * @h: Location at which to store the 'h' index of the reflection
- * @k: Location at which to store the 'k' index of the reflection
- * @l: Location at which to store the 'l' index of the reflection
+ * @hs: Location at which to store the 'h' index of the reflection
+ * @ks: Location at which to store the 'k' index of the reflection
+ * @ls: Location at which to store the 'l' index of the reflection
*
* This function gives the symmetric indices, that is, the "real" indices before
* squashing down to the asymmetric reciprocal unit. This may be useful if the
@@ -466,6 +466,14 @@ void copy_data(Reflection *to, const Reflection *from)
}
+/**
+ * set_detector_pos:
+ * @refl: A %Reflection
+ * @exerr: The excitation error for this reflection
+ * @fs: The fast scan offset of the reflection
+ * @ss: The slow scan offset of the reflection
+ *
+ **/
void set_detector_pos(Reflection *refl, double exerr, double fs, double ss)
{
refl->data.excitation_error = exerr;
@@ -474,6 +482,19 @@ void set_detector_pos(Reflection *refl, double exerr, double fs, double ss)
}
+/**
+ * set_partial:
+ * @refl: A %Reflection
+ * @r1: The first excitation error
+ * @r2: The second excitation error
+ * @p: The partiality
+ * @clamp_low: The first clamp status
+ * @clamp_high: The second clamp status
+ *
+ * This function is used during post refinement (in conjunction with
+ * get_partial()) to get access to the details of the partiality calculation.
+ *
+ **/
void set_partial(Reflection *refl, double r1, double r2, double p,
double clamp_low, double clamp_high)
{
@@ -485,42 +506,107 @@ void set_partial(Reflection *refl, double r1, double r2, double p,
}
+/**
+ * set_int:
+ * @refl: A %Reflection
+ * @intensity: The intensity for the reflection.
+ *
+ * Set the intensity for the reflection. Note that retrieval is done with
+ * get_intensity().
+ **/
void set_int(Reflection *refl, double intensity)
{
refl->data.intensity = intensity;
}
+/**
+ * set_scalable:
+ * @refl: A %Reflection
+ * @scalable: Non-zero if this reflection was marked as useful for scaling and
+ * post refinement.
+ *
+ **/
void set_scalable(Reflection *refl, int scalable)
{
refl->data.scalable = scalable;
}
+/**
+ * set_redundancy:
+ * @refl: A %Reflection
+ * @red: New redundancy for the reflection
+ *
+ * The redundancy of the reflection is the number of measurements that have been
+ * made of it. Note that a redundancy of zero may have a special meaning, such
+ * as that the reflection was impossible to integrate. Note further that each
+ * reflection in the list has its own redundancy, even if there are multiple
+ * copies of the reflection in the list. The total number of reflection
+ * measurements should always be the sum of the redundancies in the entire list.
+ *
+ **/
void set_redundancy(Reflection *refl, int red)
{
refl->data.redundancy = red;
}
+/**
+ * set_sum_squared_dev:
+ * @refl: A %Reflection
+ * @dev: New sum squared deviation for the reflection
+ *
+ * The sum squared deviation is used to estimate the standard errors on the
+ * intensities during 'Monte Carlo' merging. It is defined as the sum of the
+ * squared deviations between the intensities and the mean intensity from all
+ * measurements of the reflection (and probably its symmetry equivalents
+ * according to some point group).
+ *
+ **/
void set_sum_squared_dev(Reflection *refl, double dev)
{
refl->data.sum_squared_dev = dev;
}
+/**
+ * set_esd_intensity:
+ * @refl: A %Reflection
+ * @esd: New standard error for this reflection's intensity measurement
+ *
+ **/
void set_esd_intensity(Reflection *refl, double esd)
{
refl->data.esd_i = esd;
}
+/**
+ * set_ph:
+ * @refl: A %Reflection
+ * @phase: New phase for the reflection
+ *
+ **/
void set_ph(Reflection *refl, double phase)
{
refl->data.phase = phase;
}
+/**
+ * set_symmetric_indices:
+ * @refl: A %Reflection
+ * @hs: The 'h' index of the reflection
+ * @ks: The 'k' index of the reflection
+ * @ls: The 'l' index of the reflection
+ *
+ * This function gives the symmetric indices, that is, the "real" indices before
+ * squashing down to the asymmetric reciprocal unit. This may be useful if the
+ * list is indexed according to the asymmetric indices, but you still need
+ * access to the symmetric version. This happens during post-refinement.
+ *
+ **/
void set_symmetric_indices(Reflection *refl,
signed int hs, signed int ks, signed int ls)
{
@@ -600,6 +686,20 @@ static Reflection *insert_node(Reflection *refl, Reflection *new)
}
+/**
+ * add_refl
+ * @list: A %RefList
+ * @h: The 'h' index of the reflection
+ * @k: The 'k' index of the reflection
+ * @l: The 'l' index of the reflection
+ *
+ * Adds a new reflection to @list. Note that the implementation allows there to
+ * be multiple reflections with the same indices in the list, so this function
+ * should succeed even if the given indices already feature in the list.
+ *
+ * Returns: The newly created reflection, or NULL on failure.
+ *
+ **/
Reflection *add_refl(RefList *list, signed int h, signed int k, signed int l)
{
Reflection *new;
@@ -642,6 +742,18 @@ struct _reflistiterator {
};
+/**
+ * first_refl:
+ * @list: A %RefList to iterate over
+ * @piter: Address at which to store a %RefListIterator
+ *
+ * This function sets up the state required for iteration over the entire list,
+ * and then returns the first reflection in the list. An iterator object will
+ * be created and its address stored at the location given in piter.
+ *
+ * Returns: the first reflection in the list.
+ *
+ **/
Reflection *first_refl(RefList *list, RefListIterator **piter)
{
RefListIterator *iter;
@@ -681,6 +793,17 @@ Reflection *first_refl(RefList *list, RefListIterator **piter)
}
+/**
+ * next_refl:
+ * @refl: A reflection
+ * @iter: A %RefListIterator
+ *
+ * This function looks up the next reflection in the list that was given earlier
+ * to first_refl().
+ *
+ * Returns: the next reflection in the list, or NULL if no more.
+ *
+ **/
Reflection *next_refl(Reflection *refl, RefListIterator *iter)
{
int returned = 1;
@@ -742,12 +865,29 @@ static int recursive_count(Reflection *refl)
}
+/**
+ * num_reflections:
+ * @list: A %RefList
+ *
+ * Returns: the number of reflections in @list.
+ *
+ **/
int num_reflections(RefList *list)
{
return recursive_count(list->head);
}
+/**
+ * tree_depth:
+ * @list: A %RefList
+ *
+ * If the depth of the tree is more than about 20, access to the list will be
+ * slow. This should never happen.
+ *
+ * Returns: the depth of the RB-tree used internally to represent @list.
+ *
+ **/
int tree_depth(RefList *list)
{
return recursive_depth(list->head);
diff --git a/src/reflist.h b/src/reflist.h
index 83a0de90..ffdef55a 100644
--- a/src/reflist.h
+++ b/src/reflist.h
@@ -19,18 +19,32 @@
/**
* RefList:
*
+ * A %RefList represents a list of Bragg reflections.
+ *
* This data structure is opaque. You must use the available accessor functions
* to read and write its contents.
- */
+ *
+ **/
typedef struct _reflist RefList;
/**
* Reflection:
*
+ * A %Reflection represents a single Bragg reflection.
+ *
* This data structure is opaque. You must use the available accessor functions
* to read and write its contents.
- */
+ *
+ **/
typedef struct _reflection Reflection;
+
+/**
+ * RefListIterator:
+ *
+ * A %RefListIterator is an opaque data type used when iterating over a
+ * %RefList.
+ *
+ **/
typedef struct _reflistiterator RefListIterator;
/* Creation/deletion */
@@ -79,7 +93,7 @@ extern Reflection *add_refl(RefList *list,
signed int h, signed int k, signed int l);
/* Iteration */
-extern Reflection *first_refl(RefList *list, RefListIterator **iterator);
+extern Reflection *first_refl(RefList *list, RefListIterator **piter);
extern Reflection *next_refl(Reflection *refl, RefListIterator *iter);
/* Misc */
diff --git a/src/thread-pool.c b/src/thread-pool.c
index e8ca4d6a..0ae26e17 100644
--- a/src/thread-pool.c
+++ b/src/thread-pool.c
@@ -124,8 +124,6 @@ signed int get_status_label()
}
-/* ---------------------------- Custom get_task() --------------------------- */
-
struct task_queue
{
pthread_mutex_t lock;
@@ -203,28 +201,29 @@ static void *task_worker(void *pargsv)
* @get_task: The function which will determine the next unassigned task
* @final: The function which will be called to clean up after a task
* @queue_args: A pointer to any data required to determine the next task
- * @max: Stop calling get_task() after starting this number of jobs
+ * @max: Stop calling get_task after starting this number of jobs
* @cpu_num: The number of CPUs in the system
* @cpu_groupsize: The group size into which the CPUs are grouped
* @cpu_offset: The CPU group number at which to start pinning threads
*
- * get_task() will be called every time a worker is idle. It returns either
+ * 'get_task' will be called every time a worker is idle. It returns either
* NULL, indicating that no further work is available, or a pointer which will
- * be passed to work().
+ * be passed to 'work'.
*
- * final() will be called once per image, and will be given both queue_args
+ * 'final' will be called once per image, and will be given both queue_args
* and the last task pointer.
*
- * get_task() and final() will be called only under lock, and so do NOT need to
- * be re-entrant or otherwise thread safe.
+ * 'get_task' and 'final' will be called only under lock, and so do NOT need to
+ * be re-entrant or otherwise thread safe. 'work', of course, needs to be
+ * thread safe.
*
- * Work will stop after 'max' tasks have been processed whether get_task()
+ * Work will stop after 'max' tasks have been processed whether get_task
* returned NULL or not. If "max" is zero, all tasks will be processed.
*
* Returns: The number of tasks completed.
**/
-int run_threads(int n_threads, void (*work)(void *, int),
- void *(*get_task)(void *), void (*final)(void *, void *),
+int run_threads(int n_threads, TPWorkFunc work,
+ TPGetTaskFunc get_task, TPFinalFunc final,
void *queue_args, int max,
int cpu_num, int cpu_groupsize, int cpu_offset)
{
diff --git a/src/thread-pool.h b/src/thread-pool.h
index eb7fb99c..a99a7ade 100644
--- a/src/thread-pool.h
+++ b/src/thread-pool.h
@@ -23,8 +23,47 @@
extern pthread_mutex_t stderr_lock;
extern signed int get_status_label(void);
-extern int run_threads(int n_threads, void (*work)(void *, int),
- void *(*get_task)(void *), void (*final)(void *, void *),
+
+/**
+ * TPGetTaskFunc:
+ * @qargs: The queue_args pointer which was given to run_threads().
+ * Returns: A pointer which will be passed to the worker function.
+ *
+ * This function is called, non-reentrantly, to get a new work item to give to
+ * your work function. The stuff you need to generate the new work item should
+ * have been stored in @qargs which was passed to run_threads().
+ *
+ **/
+typedef void *(*TPGetTaskFunc)(void *qargs);
+
+
+/**
+ * TPWorkFunc:
+ * @work: The queue_args pointer which was given to run_threads().
+ * @cookie: A small integral number which is guaranteed to be unique among all
+ * currently running threads.
+ *
+ * This function is called, reentrantly, for each work item.
+ *
+ **/
+typedef void (*TPWorkFunc)(void *work, int cookie);
+
+
+/**
+ * TPFinalFunc:
+ * @qargs: The queue_args pointer which was given to run_threads().
+ * @work: The pointer which was returned by your get_task function.
+ *
+ * This function is called, non-reentrantly, after each work item has been
+ * completed. A typical use might be to update some counters inside @qargs
+ * according to fields withing @work which were filled by your 'work' function.
+ *
+ **/
+typedef void (*TPFinalFunc)(void *qargs, void *work);
+
+
+extern int run_threads(int n_threads, TPWorkFunc work,
+ TPGetTaskFunc get_task, TPFinalFunc final,
void *queue_args, int max,
int cpu_num, int cpu_groupsize, int cpu_offset);
diff --git a/src/utils.c b/src/utils.c
index 3d96263e..d10f0f42 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -178,7 +178,7 @@ int poisson_noise(double expected)
* @Image:
*
* There is a simple quaternion structure in CrystFEL. At the moment, it is
- * only used when simulating patterns, as an argument to %cell_rotate to
+ * only used when simulating patterns, as an argument to cell_rotate() to
* orient the unit cell.
*/