aboutsummaryrefslogtreecommitdiff
path: root/src/scaling.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/scaling.c')
-rw-r--r--src/scaling.c215
1 files changed, 190 insertions, 25 deletions
diff --git a/src/scaling.c b/src/scaling.c
index 77a64384..f4c93c66 100644
--- a/src/scaling.c
+++ b/src/scaling.c
@@ -3,11 +3,11 @@
*
* Scaling
*
- * Copyright © 2012-2015 Deutsches Elektronen-Synchrotron DESY,
+ * Copyright © 2012-2017 Deutsches Elektronen-Synchrotron DESY,
* a research centre of the Helmholtz Association.
*
* Authors:
- * 2010-2015 Thomas White <taw@physics.org>
+ * 2010-2017 Thomas White <taw@physics.org>
*
* This file is part of CrystFEL.
*
@@ -38,16 +38,14 @@
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_eigen.h>
#include <gsl/gsl_blas.h>
+#include <gsl/gsl_fit.h>
#include "merge.h"
#include "post-refinement.h"
#include "symmetry.h"
#include "cell.h"
#include "cell-utils.h"
-
-
-/* Maximum number of iterations of NLSq to do for each image per macrocycle. */
-#define MAX_CYCLES (10)
+#include "scaling.h"
/* Apply the given shift to the 'k'th parameter of 'image'. */
@@ -64,9 +62,9 @@ static void apply_shift(Crystal *cr, int k, double shift)
break;
case GPARAM_OSF :
- t = crystal_get_osf(cr);
+ t = -log(crystal_get_osf(cr));
t += shift;
- crystal_set_osf(cr, t);
+ crystal_set_osf(cr, exp(-t));
break;
default :
@@ -78,8 +76,7 @@ static void apply_shift(Crystal *cr, int k, double shift)
/* Perform one cycle of scaling of 'cr' against 'full' */
-static double scale_iterate(Crystal *cr, const RefList *full,
- PartialityModel pmodel, int *nr)
+static double scale_iterate(Crystal *cr, const RefList *full, int *nr)
{
gsl_matrix *M;
gsl_vector *v;
@@ -182,7 +179,7 @@ static double scale_iterate(Crystal *cr, const RefList *full,
}
- fx = G + log(p) - log(L) - B*s*s + log(I_full);
+ fx = -log(G) + log(p) - log(L) - B*s*s + log(I_full);
delta_I = log(I_partial) - fx;
v_c = w * delta_I * gradients[k];
v_curr = gsl_vector_get(v, k);
@@ -271,7 +268,7 @@ double log_residual(Crystal *cr, const RefList *full, int free,
if ( I_full <= 0 ) continue; /* Because log */
if ( p <= 0.0 ) continue; /* Because of log */
- fx = G + log(p) - log(L) - B*s*s + log(I_full);
+ fx = -log(G) + log(p) - log(L) - B*s*s + log(I_full);
dc = log(I_partial) - fx;
w = 1.0;
dev += w*dc*dc;
@@ -290,8 +287,7 @@ double log_residual(Crystal *cr, const RefList *full, int free,
}
-static void do_scale_refine(Crystal *cr, const RefList *full,
- PartialityModel pmodel, int *nr)
+static void do_scale_refine(Crystal *cr, const RefList *full, int *nr)
{
int i, done;
double old_dev;
@@ -304,7 +300,7 @@ static void do_scale_refine(Crystal *cr, const RefList *full,
double dev;
- scale_iterate(cr, full, pmodel, nr);
+ scale_iterate(cr, full, nr);
dev = log_residual(cr, full, 0, 0, NULL);
if ( fabs(dev - old_dev) < dev*0.01 ) done = 1;
@@ -312,7 +308,7 @@ static void do_scale_refine(Crystal *cr, const RefList *full,
i++;
old_dev = dev;
- } while ( i < MAX_CYCLES && !done );
+ } while ( i < 10 && !done );
}
@@ -320,7 +316,6 @@ struct scale_args
{
RefList *full;
Crystal *crystal;
- PartialityModel pmodel;
int n_reflections;
};
@@ -339,8 +334,7 @@ struct queue_args
static void scale_crystal(void *task, int id)
{
struct scale_args *pargs = task;
- do_scale_refine(pargs->crystal, pargs->full, pargs->pmodel,
- &pargs->n_reflections);
+ do_scale_refine(pargs->crystal, pargs->full, &pargs->n_reflections);
}
@@ -381,14 +375,12 @@ static double total_log_r(Crystal **crystals, int n_crystals, RefList *full,
int n = 0;
for ( i=0; i<n_crystals; i++ ) {
-
double r;
if ( crystal_get_user_flag(crystals[i]) ) continue;
r = log_residual(crystals[i], full, 0, NULL, NULL);
if ( isnan(r) ) continue;
total += r;
n++;
-
}
if ( ninc != NULL ) *ninc = n;
@@ -397,8 +389,7 @@ static double total_log_r(Crystal **crystals, int n_crystals, RefList *full,
/* Perform iterative scaling, all the way to convergence */
-void scale_all(Crystal **crystals, int n_crystals, int nthreads,
- PartialityModel pmodel)
+void scale_all(Crystal **crystals, int n_crystals, int nthreads)
{
struct scale_args task_defaults;
struct queue_args qargs;
@@ -406,7 +397,6 @@ void scale_all(Crystal **crystals, int n_crystals, int nthreads,
int niter = 0;
task_defaults.crystal = NULL;
- task_defaults.pmodel = pmodel;
qargs.task_defaults = task_defaults;
qargs.n_crystals = n_crystals;
@@ -422,7 +412,7 @@ void scale_all(Crystal **crystals, int n_crystals, int nthreads,
double bef_res;
full = merge_intensities(crystals, n_crystals, nthreads,
- pmodel, 2, INFINITY, 0);
+ 2, INFINITY, 0);
old_res = new_res;
bef_res = total_log_r(crystals, n_crystals, full, NULL);
@@ -456,3 +446,178 @@ void scale_all(Crystal **crystals, int n_crystals, int nthreads,
ERROR("Too many iterations - giving up!\n");
}
}
+
+
+static void scale_crystal_linear(void *task, int id)
+{
+ struct scale_args *pargs = task;
+ int r;
+ double G;
+
+ /* Simple iterative algorithm */
+ r = linear_scale(pargs->full, crystal_get_reflections(pargs->crystal), &G, 0);
+ if ( r == 0 ) {
+ crystal_set_osf(pargs->crystal, G);
+ } else {
+ crystal_set_user_flag(pargs->crystal, PRFLAG_SCALEBAD);
+ }
+}
+
+
+/* Calculates G, by which list2 should be multiplied to fit list1 */
+int linear_scale(const RefList *list1, const RefList *list2, double *G,
+ int complain_loudly)
+{
+ const Reflection *refl1;
+ const Reflection *refl2;
+ RefListIterator *iter;
+ int max_n = 256;
+ int n = 0;
+ double *x;
+ double *y;
+ double *w;
+ int r;
+ double cov11;
+ double sumsq;
+ int n_esd1 = 0;
+ int n_esd2 = 0;
+ int n_ih1 = 0;
+ int n_ih2 = 0;
+ int n_nan1 = 0;
+ int n_nan2 = 0;
+ int n_inf1 = 0;
+ int n_inf2 = 0;
+ int n_part = 0;
+ int n_nom = 0;
+
+ x = malloc(max_n*sizeof(double));
+ w = malloc(max_n*sizeof(double));
+ y = malloc(max_n*sizeof(double));
+ if ( (x==NULL) || (y==NULL) || (w==NULL) ) {
+ ERROR("Failed to allocate memory for scaling.\n");
+ return 1;
+ }
+
+ int nb = 0;
+ for ( refl2 = first_refl_const(list2, &iter);
+ refl2 != NULL;
+ refl2 = next_refl_const(refl2, iter) )
+ {
+ signed int h, k, l;
+ double Ih1, Ih2;
+ double esd1, esd2;
+ nb++;
+
+ get_indices(refl2, &h, &k, &l);
+ refl1 = find_refl(list1, h, k, l);
+ if ( refl1 == NULL ) {
+ n_nom++;
+ continue;
+ }
+
+ Ih1 = get_intensity(refl1);
+ Ih2 = get_intensity(refl2);
+ esd1 = get_esd_intensity(refl1);
+ esd2 = get_esd_intensity(refl2);
+
+ /* Problem cases in approximate descending order of severity */
+ if ( isnan(Ih1) ) { n_nan1++; continue; }
+ if ( isinf(Ih1) ) { n_inf1++; continue; }
+ if ( isnan(Ih2) ) { n_nan2++; continue; }
+ if ( isinf(Ih2) ) { n_inf2++; continue; }
+ if ( get_partiality(refl2) < 0.3 ) { n_part++; continue; }
+ //if ( Ih1 <= 0.0 ) { n_ih1++; continue; }
+ if ( Ih2 <= 0.0 ) { n_ih2++; continue; }
+ //if ( Ih1 <= 3.0*esd1 ) { n_esd1++; continue; }
+ if ( Ih2 <= 3.0*esd2 ) { n_esd2++; continue; }
+ //if ( get_redundancy(refl1) < 2 ) continue;
+
+ if ( n == max_n ) {
+ max_n *= 2;
+ x = realloc(x, max_n*sizeof(double));
+ y = realloc(y, max_n*sizeof(double));
+ w = realloc(w, max_n*sizeof(double));
+ if ( (x==NULL) || (y==NULL) || (w==NULL) ) {
+ ERROR("Failed to allocate memory for scaling.\n");
+ return 1;
+ }
+ }
+
+ x[n] = Ih2 / get_partiality(refl2);
+ y[n] = Ih1;
+ w[n] = get_partiality(refl2);
+ n++;
+
+ }
+
+ if ( n < 2 ) {
+ if ( complain_loudly ) {
+ ERROR("Not enough reflections for scaling (had %i, but %i remain)\n", nb, n);
+ if ( n_esd1 ) ERROR("%i reference reflection esd\n", n_esd1);
+ if ( n_esd2 ) ERROR("%i subject reflection esd\n", n_esd2);
+ if ( n_ih1 ) ERROR("%i reference reflection intensity\n", n_ih1);
+ if ( n_ih2 ) ERROR("%i subject reflection intensity\n", n_ih2);
+ if ( n_nan1 ) ERROR("%i reference reflection nan\n", n_nan1);
+ if ( n_nan2 ) ERROR("%i subject reflection nan\n", n_nan2);
+ if ( n_inf1 ) ERROR("%i reference reflection inf\n", n_inf1);
+ if ( n_inf2 ) ERROR("%i subject reflection inf\n", n_inf2);
+ if ( n_part ) ERROR("%i subject reflection partiality\n", n_part);
+ if ( n_nom ) ERROR("%i no match in reference list\n", n_nom);
+ }
+ *G = 1.0;
+ return 1;
+ }
+
+ r = gsl_fit_wmul(x, 1, w, 1, y, 1, n, G, &cov11, &sumsq);
+
+ if ( r ) {
+ ERROR("Scaling failed.\n");
+ return 1;
+ }
+
+ if ( isnan(*G) ) {
+
+ if ( complain_loudly ) {
+ ERROR("Scaling gave NaN (%i pairs)\n", n);
+ if ( n < 10 ) {
+ int i;
+ for ( i=0; i<n; i++ ) {
+ STATUS("%i %e %e %e\n", i, x[i], y[i], w[n]);
+ }
+ }
+ }
+
+ *G = 1.0;
+ return 1;
+ }
+
+ free(x);
+ free(y);
+ free(w);
+
+ return 0;
+}
+
+
+void scale_all_to_reference(Crystal **crystals, int n_crystals,
+ RefList *reference, int nthreads)
+{
+ struct scale_args task_defaults;
+ struct queue_args qargs;
+
+ task_defaults.crystal = NULL;
+
+ qargs.task_defaults = task_defaults;
+ qargs.n_crystals = n_crystals;
+ qargs.crystals = crystals;
+
+ /* Don't have threads which are doing nothing */
+ if ( n_crystals < nthreads ) nthreads = n_crystals;
+
+ qargs.task_defaults.full = reference;
+ qargs.n_started = 0;
+ qargs.n_done = 0;
+ qargs.n_reflections = 0;
+ run_threads(nthreads, scale_crystal_linear, get_crystal, done_crystal,
+ &qargs, n_crystals, 0, 0, 0);
+}