aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libcrystfel/src/geometry.c23
-rw-r--r--src/hrs-scaling.c101
-rw-r--r--src/hrs-scaling.h9
-rw-r--r--src/partialator.c17
4 files changed, 116 insertions, 34 deletions
diff --git a/libcrystfel/src/geometry.c b/libcrystfel/src/geometry.c
index f457fc4f..cdc62ab5 100644
--- a/libcrystfel/src/geometry.c
+++ b/libcrystfel/src/geometry.c
@@ -362,6 +362,21 @@ RefList *select_intersections(struct image *image, Crystal *cryst)
}
+static void set_unity_partialities(Crystal *cryst)
+{
+ Reflection *refl;
+ RefListIterator *iter;
+
+ for ( refl = first_refl(crystal_get_reflections(cryst), &iter);
+ refl != NULL;
+ refl = next_refl(refl, iter) )
+ {
+ set_partiality(refl, 1.0);
+ set_lorentz(refl, 1.0);
+ }
+}
+
+
/* Calculate partialities and apply them to the image's reflections */
void update_partialities(Crystal *cryst, PartialityModel pmodel)
{
@@ -373,6 +388,14 @@ void update_partialities(Crystal *cryst, PartialityModel pmodel)
double csx, csy, csz;
struct image *image = crystal_get_image(cryst);
+ if ( pmodel == PMODEL_UNITY ) {
+ /* It isn't strictly necessary to set the partialities to 1,
+ * because the scaling stuff will just a correction factor of
+ * 1 anyway. This is just to help things make sense. */
+ set_unity_partialities(cryst);
+ return;
+ }
+
cell_get_reciprocal(crystal_get_cell(cryst), &asx, &asy, &asz,
&bsx, &bsy, &bsz, &csx, &csy, &csz);
diff --git a/src/hrs-scaling.c b/src/hrs-scaling.c
index f60dfe25..3a51e6d9 100644
--- a/src/hrs-scaling.c
+++ b/src/hrs-scaling.c
@@ -60,6 +60,7 @@ struct scale_queue_args
Crystal **crystals;
int n_started;
double max_shift;
+ PartialityModel pmodel;
};
@@ -68,6 +69,7 @@ struct scale_worker_args
Crystal *crystal;
double shift;
RefList *reference;
+ PartialityModel pmodel;
};
@@ -78,6 +80,7 @@ static void *create_scale_job(void *vqargs)
wargs = malloc(sizeof(struct scale_worker_args));
wargs->reference = qargs->reference;
+ wargs->pmodel = qargs->pmodel;
wargs->crystal = qargs->crystals[qargs->n_started++];
@@ -109,7 +112,7 @@ static void run_scale_job(void *vwargs, int cookie)
signed int h, k, l;
double Ih, Ihl, esd;
Reflection *r;
- double L, p;
+ double corr;
if ( !get_scalable(refl) ) continue;
@@ -126,11 +129,21 @@ static void run_scale_job(void *vwargs, int cookie)
Ih = get_intensity(r);
}
- /* If you change this, be sure to change run_merge_job() too */
- p = get_partiality(refl);
- L = get_lorentz(refl);
- Ihl = get_intensity(refl) / (L*p);
- esd = get_esd_intensity(refl) / (L*p);
+ /* If you change this, be sure to also change
+ * run_merge_job() and run_esd_job(). */
+ switch ( wargs->pmodel ) {
+
+ case PMODEL_UNITY :
+ corr = 1.0;
+ break;
+
+ case PMODEL_SPHERE :
+ corr = get_partiality(refl) * get_lorentz(refl);
+ break;
+
+ }
+ Ihl = get_intensity(refl) / corr;
+ esd = get_esd_intensity(refl) / corr;
num += Ih * (Ihl/osf) / pow(esd/osf, 2.0);
den += pow(Ih, 2.0)/pow(esd/osf, 2.0);
@@ -160,7 +173,7 @@ static void finalise_scale_job(void *vqargs, void *vwargs)
static double iterate_scale(Crystal **crystals, int n, RefList *reference,
- int n_threads)
+ int n_threads, PartialityModel pmodel)
{
struct scale_queue_args qargs;
@@ -170,6 +183,7 @@ static double iterate_scale(Crystal **crystals, int n, RefList *reference,
qargs.n_started = 0;
qargs.crystals = crystals;
qargs.max_shift = 0.0;
+ qargs.pmodel = pmodel;
run_threads(n_threads, run_scale_job, create_scale_job,
finalise_scale_job, &qargs, n, 0, 0, 0);
@@ -184,6 +198,7 @@ struct merge_queue_args
pthread_mutex_t full_lock;
Crystal **crystals;
int n_started;
+ PartialityModel pmodel;
};
@@ -192,6 +207,7 @@ struct merge_worker_args
Crystal *crystal;
RefList *full;
pthread_mutex_t *full_lock;
+ PartialityModel pmodel;
};
@@ -203,6 +219,7 @@ static void *create_merge_job(void *vqargs)
wargs = malloc(sizeof(struct merge_worker_args));
wargs->full = qargs->full;
wargs->full_lock = &qargs->full_lock;
+ wargs->pmodel = qargs->pmodel;
wargs->crystal = qargs->crystals[qargs->n_started++];
@@ -231,7 +248,7 @@ static void run_merge_job(void *vwargs, int cookie)
signed int h, k, l;
double num, den;
int red;
- double Ihl, esd, L, p;
+ double Ihl, esd, corr;
if ( !get_scalable(refl) ) continue;
@@ -254,11 +271,22 @@ static void run_merge_job(void *vwargs, int cookie)
red = get_redundancy(f);
}
- /* If you change this, be sure to change run_scale_job() too */
- p = get_partiality(refl);
- L = get_lorentz(refl);
- Ihl = get_intensity(refl) / (L*p);
- esd = get_esd_intensity(refl) / (L*p);
+ /* If you change this, be sure to also change
+ * run_scale_job() and run_esd_job(). */
+ switch ( wargs->pmodel ) {
+
+ case PMODEL_UNITY :
+ corr = 1.0;
+ break;
+
+ case PMODEL_SPHERE :
+ corr = get_partiality(refl) * get_lorentz(refl);
+ break;;
+
+ }
+
+ Ihl = get_intensity(refl) / corr;
+ esd = get_esd_intensity(refl) / corr;
num += (Ihl/G) / pow(esd/G, 2.0);
den += 1.0 / pow(esd/G, 2.0);
@@ -278,7 +306,8 @@ static void finalise_merge_job(void *vqargs, void *vwargs)
}
-static RefList *lsq_intensities(Crystal **crystals, int n, int n_threads)
+static RefList *lsq_intensities(Crystal **crystals, int n, int n_threads,
+ PartialityModel pmodel)
{
RefList *full;
struct merge_queue_args qargs;
@@ -290,6 +319,7 @@ static RefList *lsq_intensities(Crystal **crystals, int n, int n_threads)
qargs.full = full;
qargs.n_started = 0;
qargs.crystals = crystals;
+ qargs.pmodel = pmodel;
pthread_mutex_init(&qargs.full_lock, NULL);
run_threads(n_threads, run_merge_job, create_merge_job,
@@ -316,6 +346,7 @@ struct esd_queue_args
RefList *full;
Crystal **crystals;
int n_started;
+ PartialityModel pmodel;
};
@@ -323,6 +354,7 @@ struct esd_worker_args
{
Crystal *crystal;
RefList *full;
+ PartialityModel pmodel;
};
@@ -333,6 +365,7 @@ static void *create_esd_job(void *vqargs)
wargs = malloc(sizeof(struct esd_worker_args));
wargs->full = qargs->full;
+ wargs->pmodel = qargs->pmodel;
wargs->crystal = qargs->crystals[qargs->n_started++];
@@ -360,7 +393,7 @@ static void run_esd_job(void *vwargs, int cookie)
Reflection *f;
signed int h, k, l;
double num;
- double Ihl, Ih;
+ double Ihl, Ih, corr;
if ( !get_scalable(refl) ) continue;
@@ -372,8 +405,23 @@ static void run_esd_job(void *vwargs, int cookie)
num = get_temp1(f);
+ /* If you change this, be sure to also change
+ * run_scale_job() and run_merge_job(). */
+ switch ( wargs->pmodel ) {
+
+ case PMODEL_UNITY :
+ corr = 1.0;
+ break;
+
+ case PMODEL_SPHERE :
+ corr = get_partiality(refl) * get_lorentz(refl);
+ break;;
+
+ }
+
+
Ih = get_intensity(f);
- Ihl = get_intensity(refl) / (get_partiality(refl) * G);
+ Ihl = get_intensity(refl) / (corr * G);
num += pow(Ihl - Ih, 2.0);
@@ -390,7 +438,7 @@ static void finalise_esd_job(void *vqargs, void *vwargs)
static void calculate_esds(Crystal **crystals, int n, RefList *full,
- int n_threads, int min_red)
+ int n_threads, int min_red, PartialityModel pmodel)
{
struct esd_queue_args qargs;
Reflection *refl;
@@ -399,6 +447,7 @@ static void calculate_esds(Crystal **crystals, int n, RefList *full,
qargs.full = full;
qargs.n_started = 0;
qargs.crystals = crystals;
+ qargs.pmodel = pmodel;
for ( refl = first_refl(full, &iter);
refl != NULL;
@@ -430,7 +479,7 @@ static void calculate_esds(Crystal **crystals, int n, RefList *full,
/* Scale the stack of images */
RefList *scale_intensities(Crystal **crystals, int n, RefList *gref,
- int n_threads, int noscale)
+ int n_threads, int noscale, PartialityModel pmodel)
{
int i;
double max_corr;
@@ -440,14 +489,15 @@ RefList *scale_intensities(Crystal **crystals, int n, RefList *gref,
for ( i=0; i<n; i++ ) crystal_set_osf(crystals[i], 1.0);
if ( noscale ) {
- full = lsq_intensities(crystals, n, n_threads);
- calculate_esds(crystals, n, full, n_threads, min_redundancy);
+ full = lsq_intensities(crystals, n, n_threads, pmodel);
+ calculate_esds(crystals, n, full, n_threads, min_redundancy,
+ pmodel);
return full;
}
/* No reference -> create an initial list to refine against */
if ( gref == NULL ) {
- full = lsq_intensities(crystals, n, n_threads);
+ full = lsq_intensities(crystals, n, n_threads, pmodel);
}
/* Iterate */
@@ -463,14 +513,15 @@ RefList *scale_intensities(Crystal **crystals, int n, RefList *gref,
reference = full;
}
- max_corr = iterate_scale(crystals, n, reference, n_threads);
+ max_corr = iterate_scale(crystals, n, reference, n_threads,
+ pmodel);
//STATUS("Scaling iteration %2i: max correction = %5.2f\n",
// i+1, max_corr);
/* No reference -> generate list for next iteration */
if ( gref == NULL ) {
reflist_free(full);
- full = lsq_intensities(crystals, n, n_threads);
+ full = lsq_intensities(crystals, n, n_threads, pmodel);
}
//show_scale_factors(images, n);
@@ -480,10 +531,10 @@ RefList *scale_intensities(Crystal **crystals, int n, RefList *gref,
} while ( (max_corr > 0.01) && (i < MAX_CYCLES) );
if ( gref != NULL ) {
- full = lsq_intensities(crystals, n, n_threads);
+ full = lsq_intensities(crystals, n, n_threads, pmodel);
} /* else we already did it */
- calculate_esds(crystals, n, full, n_threads, min_redundancy);
+ calculate_esds(crystals, n, full, n_threads, min_redundancy, pmodel);
return full;
}
diff --git a/src/hrs-scaling.h b/src/hrs-scaling.h
index 80940347..8ae12380 100644
--- a/src/hrs-scaling.h
+++ b/src/hrs-scaling.h
@@ -3,11 +3,11 @@
*
* Intensity scaling using generalised HRS target function
*
- * Copyright © 2012 Deutsches Elektronen-Synchrotron DESY,
- * a research centre of the Helmholtz Association.
+ * Copyright © 2012-2013 Deutsches Elektronen-Synchrotron DESY,
+ * a research centre of the Helmholtz Association.
*
* Authors:
- * 2010-2012 Thomas White <taw@physics.org>
+ * 2010-2013 Thomas White <taw@physics.org>
*
* This file is part of CrystFEL.
*
@@ -37,10 +37,11 @@
#include "crystal.h"
#include "reflist.h"
+#include "geometry.h"
extern RefList *scale_intensities(Crystal **crystals, int n,
RefList *reference, int n_threads,
- int noscale);
+ int noscale, PartialityModel pmodel);
#endif /* HRS_SCALING_H */
diff --git a/src/partialator.c b/src/partialator.c
index 079e078e..bdacd62b 100644
--- a/src/partialator.c
+++ b/src/partialator.c
@@ -79,6 +79,7 @@ static void show_help(const char *s)
" -r, --reference=<file> Refine images against reflections in <file>,\n"
" instead of taking the mean of the intensity\n"
" estimates.\n"
+" -m, --model=<model> Specify partiality model.\n"
"\n"
" -j <n> Run <n> analyses in parallel.\n");
}
@@ -140,13 +141,18 @@ static void done_image(void *vqargs, void *task)
static void refine_all(Crystal **crystals, int n_crystals,
struct detector *det,
- RefList *full, int nthreads)
+ RefList *full, int nthreads, PartialityModel pmodel)
{
struct refine_args task_defaults;
struct queue_args qargs;
+ /* If the partiality model is "p=1", this refinement is really, really
+ * easy... */
+ if ( pmodel == PMODEL_UNITY ) return;
+
task_defaults.full = full;
task_defaults.crystal = NULL;
+ task_defaults.pmodel = pmodel;
qargs.task_defaults = task_defaults;
qargs.n_started = 0;
@@ -329,7 +335,8 @@ int main(int argc, char *argv[])
{"iterations", 1, NULL, 'n'},
{"no-scale", 0, &noscale, 1},
{"reference", 1, NULL, 'r'},
- {"partiality", 1, NULL, 'm'},
+ {"model", 1, NULL, 'm'},
+
{0, 0, NULL, 0}
};
@@ -574,7 +581,7 @@ int main(int argc, char *argv[])
STATUS("\nPerforming initial scaling.\n");
if ( noscale ) STATUS("Scale factors fixed at 1.\n");
full = scale_intensities(crystals, n_crystals, reference,
- nthreads, noscale);
+ nthreads, noscale, pmodel);
sr = sr_titlepage(crystals, n_crystals, "scaling-report.pdf",
infile, cmdline);
@@ -597,7 +604,7 @@ int main(int argc, char *argv[])
/* Refine the geometry of all patterns to get the best fit */
select_reflections_for_refinement(crystals, n_crystals,
comp, have_reference);
- refine_all(crystals, n_crystals, det, comp, nthreads);
+ refine_all(crystals, n_crystals, det, comp, nthreads, pmodel);
nobs = 0;
for ( j=0; j<n_crystals; j++ ) {
@@ -612,7 +619,7 @@ int main(int argc, char *argv[])
/* Re-estimate all the full intensities */
reflist_free(full);
full = scale_intensities(crystals, n_crystals,
- reference, nthreads, noscale);
+ reference, nthreads, noscale, pmodel);
sr_iteration(sr, i+1, crystals, n_crystals, full);