aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libcrystfel/src/geometry.c1
-rw-r--r--src/hrs-scaling.c25
-rw-r--r--src/partialator.c156
-rw-r--r--src/post-refinement.c19
-rw-r--r--src/scaling-report.c8
5 files changed, 34 insertions, 175 deletions
diff --git a/libcrystfel/src/geometry.c b/libcrystfel/src/geometry.c
index 357f64f8..6ccaf3d8 100644
--- a/libcrystfel/src/geometry.c
+++ b/libcrystfel/src/geometry.c
@@ -456,6 +456,7 @@ void update_partialities_2(Crystal *cryst, PartialityModel pmodel,
if ( get_redundancy(refl) != 0 ) {
(*n_lost)++;
+ set_partiality(refl, 0.0);
set_redundancy(refl, 0);
}
diff --git a/src/hrs-scaling.c b/src/hrs-scaling.c
index 912993a5..2fef0925 100644
--- a/src/hrs-scaling.c
+++ b/src/hrs-scaling.c
@@ -47,6 +47,12 @@
#include "reflist.h"
+/* Minimum partiality of a reflection for it to be used for scaling */
+#define MIN_PART_SCALE (0.05)
+
+/* Minimum partiality of a reflection for it to be merged */
+#define MIN_PART_MERGE (0.05)
+
/* Maximum number of iterations of scaling per macrocycle. */
#define MAX_CYCLES (10)
@@ -105,19 +111,14 @@ static void run_scale_job(void *vwargs, int cookie)
double Ih, Ihl, corr;
Reflection *r;
- if ( !get_scalable(refl) ) continue;
+ if ( get_partiality(refl) < MIN_PART_SCALE ) continue;
/* Look up by asymmetric indices */
get_indices(refl, &h, &k, &l);
r = find_refl(reference, h, k, l);
- if ( r == NULL ) {
- ERROR("%3i %3i %3i isn't in the "
- "reference list, so why is it "
- "marked as scalable?\n", h, k, l);
- Ih = 0.0;
- } else {
- Ih = get_intensity(r);
- }
+ if ( r == NULL ) continue;
+
+ Ih = get_intensity(r);
corr = get_partiality(refl) * get_lorentz(refl);
@@ -228,7 +229,7 @@ static void run_merge_job(void *vwargs, int cookie)
int red;
double Ihl, corr;
- if ( !get_scalable(refl) ) continue;
+ if ( get_partiality(refl) < MIN_PART_MERGE ) continue;
get_indices(refl, &h, &k, &l);
pthread_rwlock_rdlock(wargs->full_lock);
@@ -385,7 +386,7 @@ static void run_esd_job(void *vwargs, int cookie)
double num;
double Ihl, Ih, corr;
- if ( !get_scalable(refl) ) continue;
+ if ( get_partiality(refl) < MIN_PART_MERGE ) continue;
get_indices(refl, &h, &k, &l);
f = find_refl(full, h, k, l);
@@ -482,8 +483,6 @@ static void reject_outliers(struct osfcheck *och, int n, Crystal **crystals)
crystal_set_user_flag(crystals[och[i].n], 1);
crystal_set_osf(crystals[och[i].n],
och[i].old_osf);
- } else {
- crystal_set_user_flag(crystals[och[i].n], 0);
}
}
}
diff --git a/src/partialator.c b/src/partialator.c
index 3137a0c2..169e5f67 100644
--- a/src/partialator.c
+++ b/src/partialator.c
@@ -168,135 +168,6 @@ static void refine_all(Crystal **crystals, int n_crystals,
}
-/* Decide which reflections can be scaled */
-static int select_scalable_reflections(RefList *list, RefList *reference)
-{
- Reflection *refl;
- RefListIterator *iter;
- int n_acc = 0;
- int n_red = 0;
- int n_par = 0;
- int n_ref = 0;
-
- for ( refl = first_refl(list, &iter);
- refl != NULL;
- refl = next_refl(refl, iter) ) {
-
- int sc = 1;
-
- /* This means the reflection was not found on the last check */
- if ( get_redundancy(refl) == 0 ) {
- sc = 0;
- n_red++;
- }
-
- /* Don't try to scale up reflections which are hardly there */
- if ( get_partiality(refl) < 0.05 ) {
- sc = 0;
- n_par++;
- }
-
- /* If we are scaling against a reference set, we additionally
- * require that this reflection is in the reference list. */
- if ( reference != NULL ) {
- signed int h, k, l;
- get_indices(refl, &h, &k, &l);
- if ( find_refl(reference, h, k, l) == NULL ) {
- sc = 0;
- n_ref++;
- }
- }
-
- set_scalable(refl, sc);
-
- if ( sc ) n_acc++;
- }
-
- //STATUS("List %p: %i accepted, %i red zero, %i small part, %i no ref\n",
- // list, n_acc, n_red, n_par, n_ref);
-
- return n_acc;
-}
-
-
-static void select_reflections_for_refinement(Crystal **crystals, int n,
- RefList *full, int have_reference)
-{
- int i;
-
- for ( i=0; i<n; i++ ) {
-
- RefList *reflist;
- Reflection *refl;
- RefListIterator *iter;
- int n_acc = 0;
- int n_noscale = 0;
- int n_fewmatch = 0;
- int n_ref = 0;
- int n_weak = 0;
-
- reflist = crystal_get_reflections(crystals[i]);
- for ( refl = first_refl(reflist, &iter);
- refl != NULL;
- refl = next_refl(refl, iter) )
- {
- signed int h, k, l;
- int sc;
- double intensity, sigma;
- Reflection *f;
-
-
- n_ref++;
-
- intensity = get_intensity(refl);
- sigma = get_esd_intensity(refl);
- if ( intensity < 3.0*sigma ) {
- set_refinable(refl, 0);
- n_weak++;
- continue;
- }
-
- /* We require that the reflection itself is scalable
- * (i.e. sensible partiality and intensity) and that
- * the "full" estimate of this reflection is made from
- * at least two parts. */
- get_indices(refl, &h, &k, &l);
- sc = get_scalable(refl);
- if ( !sc ) {
- set_refinable(refl, 0);
- n_noscale++;
- continue;
- }
-
- f = find_refl(full, h, k, l);
- if ( f != NULL ) {
-
- int r = get_redundancy(f);
- if ( (r >= 2) || have_reference ) {
- set_refinable(refl, 1);
- n_acc++;
- } else {
- n_fewmatch++;
- }
-
- } else {
- ERROR("%3i %3i %3i is scalable, but is"
- " not in the reference list.\n",
- h, k, l);
- abort();
- }
-
- }
-
- //STATUS("Image %4i: %i guide reflections accepted "
- // "(%i not scalable, %i few matches, %i too weak, "
- // "%i total)\n",
- // i, n_acc, n_noscale, n_fewmatch, n_weak, n_ref);
-
- }
-}
-
-
static void display_progress(int n_images, int n_crystals)
{
if ( !isatty(STDERR_FILENO) ) return;
@@ -335,7 +206,6 @@ int main(int argc, char *argv[])
RefList *full;
int n_images = 0;
int n_crystals = 0;
- int nobs;
char *reference_file = NULL;
RefList *reference = NULL;
int have_reference = 0;
@@ -558,10 +428,6 @@ int main(int argc, char *argv[])
/* Image pointer will change due to later reallocs */
crystal_set_image(cr, NULL);
- /* Fill in initial estimates of stuff */
- crystal_set_osf(cr, 1.0);
- crystal_set_user_flag(cr, 0);
-
/* This is the raw list of reflections */
cr_refl = crystal_get_reflections(cr);
@@ -590,13 +456,11 @@ int main(int argc, char *argv[])
close_stream(st);
/* Fill in image pointers */
- nobs = 0;
for ( i=0; i<n_images; i++ ) {
int j;
for ( j=0; j<images[i].n_crystals; j++ ) {
Crystal *cryst;
- RefList *as;
int n_gained = 0;
int n_lost = 0;
double mean_p_change = 0.0;
@@ -608,12 +472,9 @@ int main(int argc, char *argv[])
update_partialities_2(cryst, pmodel, &n_gained, &n_lost,
&mean_p_change);
assert(n_gained == 0); /* That'd just be silly */
- as = crystal_get_reflections(cryst);
- nobs += select_scalable_reflections(as, reference);
}
}
- STATUS("%i scalable observations.\n", nobs);
/* Make initial estimates */
STATUS("Performing initial scaling.\n");
@@ -632,7 +493,7 @@ int main(int argc, char *argv[])
/* Iterate */
for ( i=0; i<n_iter; i++ ) {
-
+ int n_noscale = 0;
int n_noref = 0;
int n_solve = 0;
int n_lost = 0;
@@ -646,31 +507,28 @@ int main(int argc, char *argv[])
/* Refine the geometry of all patterns to get the best fit */
comp = (reference == NULL) ? full : reference;
- select_reflections_for_refinement(crystals, n_crystals,
- comp, have_reference);
refine_all(crystals, n_crystals, comp, nthreads, pmodel,
&srdata);
- nobs = 0;
for ( j=0; j<n_crystals; j++ ) {
int flag;
- Crystal *cr = crystals[j];
- RefList *rf = crystal_get_reflections(cr);
- flag = crystal_get_user_flag(cr);
+ flag = crystal_get_user_flag(crystals[j]);
if ( flag != 0 ) n_dud++;
if ( flag == 1 ) {
- n_noref++;
+ n_noscale++;
} else if ( flag == 2 ) {
- n_solve++;
+ n_noref++;
} else if ( flag == 3 ) {
+ n_solve++;
+ } else if ( flag == 4 ) {
n_lost++;
}
- nobs += select_scalable_reflections(rf, reference);
}
if ( n_dud ) {
STATUS("%i crystals could not be refined this cycle.\n",
n_dud);
+ STATUS("%i scaling failed.\n", n_noscale);
STATUS("%i not enough reflections.\n", n_noref);
STATUS("%i solve failed.\n", n_solve);
STATUS("%i lost too many reflections.\n", n_lost);
diff --git a/src/post-refinement.c b/src/post-refinement.c
index 26299f2c..1b950d32 100644
--- a/src/post-refinement.c
+++ b/src/post-refinement.c
@@ -48,6 +48,9 @@
#include "cell-utils.h"
+/* Minimum partiality of a reflection for it to be used for refinement */
+#define MIN_PART_REFINE (0.1)
+
/* Maximum number of iterations of NLSq to do for each image per macrocycle. */
#define MAX_CYCLES (10)
@@ -498,7 +501,8 @@ static double pr_iterate(Crystal *cr, const RefList *full,
Reflection *match;
double gradients[NUM_PARAMS];
- if ( !get_refinable(refl) ) continue;
+ if ( (get_intensity(refl) < 3.0*get_esd_intensity(refl))
+ || (get_partiality(refl) < MIN_PART_REFINE) ) continue;
/* Find the full version */
get_indices(refl, &ha, &ka, &la);
@@ -565,7 +569,7 @@ static double pr_iterate(Crystal *cr, const RefList *full,
//STATUS("%i reflections went into the equations.\n", nref);
if ( nref == 0 ) {
- crystal_set_user_flag(cr, 1);
+ crystal_set_user_flag(cr, 2);
gsl_matrix_free(M);
gsl_vector_free(v);
return 0.0;
@@ -581,10 +585,9 @@ static double pr_iterate(Crystal *cr, const RefList *full,
//STATUS("Shift %i: %e\n", param, shift);
if ( fabs(shift) > max_shift ) max_shift = fabs(shift);
}
- crystal_set_user_flag(cr, 0);
} else {
- crystal_set_user_flag(cr, 2);
+ crystal_set_user_flag(cr, 4);
}
gsl_matrix_free(M);
@@ -612,7 +615,8 @@ static double guide_dev(Crystal *cr, const RefList *full)
Reflection *full_version;
double I_full, I_partial;
- if ( !get_refinable(refl) ) continue;
+ if ( (get_intensity(refl) < 3.0*get_esd_intensity(refl))
+ || (get_partiality(refl) < MIN_PART_REFINE) ) continue;
get_indices(refl, &h, &k, &l);
assert((h!=0) || (k!=0) || (l!=0));
@@ -696,6 +700,11 @@ struct prdata pr_refine(Crystal *cr, const RefList *full,
struct prdata prdata;
double mean_p_change = 0.0;
+ prdata.n_filtered = 0;
+
+ /* Don't refine crystal if scaling was bad */
+ if ( crystal_get_user_flag(cr) != 0 ) return prdata;
+
if ( verbose ) {
dev = guide_dev(cr, full);
STATUS("\n"); /* Deal with progress bar */
diff --git a/src/scaling-report.c b/src/scaling-report.c
index 85ed0ad2..351d2dbd 100644
--- a/src/scaling-report.c
+++ b/src/scaling-report.c
@@ -249,8 +249,6 @@ static void partiality_graph(cairo_t *cr, Crystal **crystals, int n,
Reflection *f;
int bin;
- if ( !get_scalable(refl) ) continue;
-
get_indices(refl, &h, &k, &l);
f = find_refl(full, h, k, l);
if ( f == NULL ) continue;
@@ -364,8 +362,6 @@ static void partiality_histogram(cairo_t *cr, Crystal **crystals, int n,
signed int h, k, l;
Reflection *f;
- if ( !get_scalable(refl) ) continue;
-
get_indices(refl, &h, &k, &l);
f = find_refl(full, h, k, l);
if ( f == NULL ) continue;
@@ -604,8 +600,6 @@ static void intensity_histogram(cairo_t *cr, Crystal **crystals, int n,
{
double Iobs, pcalc, Ifull_est;
- if ( !get_scalable(f) ) continue;
-
pcalc = get_partiality(f);
Iobs = get_intensity(f);
Ifull_est = Iobs / (pcalc * osf);
@@ -639,8 +633,6 @@ static void intensity_histogram(cairo_t *cr, Crystal **crystals, int n,
{
double Iobs, pcalc, Ifull_est;
- if ( !get_scalable(f) ) continue;
-
pcalc = get_partiality(f);
Iobs = get_intensity(f);
Ifull_est = Iobs / (pcalc * osf);