aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThomas White <taw@physics.org>2011-07-06 15:53:31 +0200
committerThomas White <taw@physics.org>2012-02-22 15:27:32 +0100
commit7a7cf0355140357aafc48cbcf79b3b0dc1e574cd (patch)
tree3ddeb582840c2cc5f136c92cde144a32c5e90aef /src
parentd5337c0c6494e388b991141856bcdaaebe2802f3 (diff)
Fix the choice of "guiding" reflections for PR
Diffstat (limited to 'src')
-rw-r--r--src/geometry.c2
-rw-r--r--src/geometry.h2
-rw-r--r--src/hrs-scaling.c183
-rw-r--r--src/hrs-scaling.h6
-rw-r--r--src/partialator.c167
-rw-r--r--src/post-refinement.c36
-rw-r--r--src/post-refinement.h3
7 files changed, 225 insertions, 174 deletions
diff --git a/src/geometry.c b/src/geometry.c
index df1e8ac4..b338da56 100644
--- a/src/geometry.c
+++ b/src/geometry.c
@@ -347,7 +347,7 @@ void predict_corresponding_reflections(struct image *image, const char *sym,
/* Calculate partialities and apply them to the image's raw_reflections */
-void update_partialities(struct image *image, const char *sym)
+void update_partialities(struct image *image)
{
Reflection *refl;
RefListIterator *iter;
diff --git a/src/geometry.h b/src/geometry.h
index efda6c87..c47f4a27 100644
--- a/src/geometry.h
+++ b/src/geometry.h
@@ -25,7 +25,7 @@ extern void predict_corresponding_reflections(struct image *image,
const char *sym, int *n_expected,
int *n_found, int *n_notfound);
-extern void update_partialities(struct image *image, const char *sym);
+extern void update_partialities(struct image *image);
#endif /* GEOMETRY_H */
diff --git a/src/hrs-scaling.c b/src/hrs-scaling.c
index 2e2f8d99..ff73ee77 100644
--- a/src/hrs-scaling.c
+++ b/src/hrs-scaling.c
@@ -35,7 +35,7 @@
#define MAX_CYCLES (50)
-char *find_common_reflections(struct image *images, int n)
+static char *find_common_reflections(struct image *images, int n)
{
int i;
char *cref;
@@ -54,6 +54,8 @@ char *find_common_reflections(struct image *images, int n)
signed int h, k, l;
int j;
+ if ( !get_scalable(refl) ) continue;
+
get_indices(refl, &h, &k, &l);
for ( j=0; j<i; j++ ) {
@@ -84,8 +86,8 @@ static void s_uhavha(signed int hat, signed int kat, signed int lat,
for ( refl = find_refl(reflections, hat, kat, lat);
refl != NULL;
- refl = next_found_refl(refl) ) {
-
+ refl = next_found_refl(refl) )
+ {
double ic, sigi;
if ( !get_scalable(refl) ) continue;
@@ -95,7 +97,6 @@ static void s_uhavha(signed int hat, signed int kat, signed int lat,
uha_val += 1.0 / pow(sigi, 2.0);
vha_val += ic / pow(sigi, 2.0);
-
}
*uha = uha_val;
@@ -104,7 +105,7 @@ static void s_uhavha(signed int hat, signed int kat, signed int lat,
static void s_uhvh(struct image *images, int n,
- signed int h, signed int k, signed int l, const char *sym,
+ signed int h, signed int k, signed int l,
double *uhp, double *vhp)
{
int a;
@@ -212,51 +213,52 @@ static gsl_vector *solve_diagonal(gsl_vector *v, gsl_matrix *M)
}
-static double iterate_scale(struct image *images, int n,
- ReflItemList *obs, const char *sym, char *cref,
- double *reference)
+static double iterate_scale(struct image *images, int n, RefList *scalable,
+ char *cref, RefList *reference)
{
gsl_matrix *M;
gsl_vector *v;
gsl_vector *shifts;
double max_shift;
- int n_ref;
double *uh_arr;
double *vh_arr;
double *uha_arr;
double *vha_arr;
- int h; /* Reflection index */
int frame;
- int refidx;
+ Reflection *refl;
+ RefListIterator *iter;
M = gsl_matrix_calloc(n, n);
v = gsl_vector_calloc(n);
- n_ref = num_items(obs);
uh_arr = new_list_intensity();
vh_arr = new_list_intensity();
- for ( h=0; h<n_ref; h++ ) {
-
+ for ( refl = first_refl(scalable, &iter);
+ refl != NULL;
+ refl = next_refl(refl, iter) )
+ {
double uh, vh;
- struct refl_item *it = get_item(obs, h);
+ signed int h, k, l;
- s_uhvh(images, n, it->h, it->k, it->l, sym, &uh, &vh);
+ get_indices(refl, &h, &k, &l);
- set_intensity(uh_arr, it->h, it->k, it->l, uh);
- set_intensity(vh_arr, it->h, it->k, it->l, vh);
+ s_uhvh(images, n, h, k, l, &uh, &vh);
+ set_intensity(uh_arr, h, k, l, uh);
+ set_intensity(vh_arr, h, k, l, vh);
}
uha_arr = malloc(n*sizeof(double));
vha_arr = malloc(n*sizeof(double));
- for ( refidx=0; refidx<n_ref; refidx++ ) {
-
+ for ( refl = first_refl(scalable, &iter);
+ refl != NULL;
+ refl = next_refl(refl, iter) )
+ {
int a;
- struct refl_item *it = get_item(obs, refidx);
- const signed int h = it->h;
- const signed int k = it->k;
- const signed int l = it->l;
+ signed int h, k, l;
+
+ get_indices(refl, &h, &k, &l);
/* For this reflection, calculate all the possible
* values of uha and vha */
@@ -290,7 +292,15 @@ static double iterate_scale(struct image *images, int n,
if ( isnan(Ih) ) Ih = 0.0;
} else {
/* Look up by asymmetric indices */
- Ih = lookup_intensity(reference, h, k, l);
+ Reflection *r = find_refl(reference, h, k, l);
+ if ( r == NULL ) {
+ ERROR("%3i %3i %3i isn't in the "
+ "reference list, so why is it "
+ "marked as scalable?\n", h, k, l);
+ Ih = 0.0;
+ } else {
+ Ih = get_intensity(r);
+ }
}
rha = vha - image_a->osf * uha * Ih;
@@ -310,7 +320,9 @@ static double iterate_scale(struct image *images, int n,
if ( (reference != NULL) && (a!=b) ) continue;
/* Zero if no common reflections */
- if ( cref[a+n*b] != 0 ) continue;
+ if ( (reference == NULL) && cref[a+n*b] != 0 ) {
+ continue;
+ }
uhb = uha_arr[b];
vhb = vha_arr[b];
@@ -335,7 +347,6 @@ static double iterate_scale(struct image *images, int n,
gsl_vector_set(v, a, vval+vc);
}
-
}
free(uh_arr);
@@ -379,27 +390,28 @@ static double iterate_scale(struct image *images, int n,
}
-static RefList *lsq_intensities(struct image *images, int n,
- ReflItemList *obs, const char *sym)
+static void lsq_intensities(struct image *images, int n, RefList *full)
{
- RefList *full;
- int i;
-
- full = reflist_new();
- for ( i=0; i<num_items(obs); i++ ) {
+ Reflection *refl;
+ RefListIterator *iter;
- struct refl_item *it = get_item(obs, i);
+ for ( refl = first_refl(full, &iter);
+ refl != NULL;
+ refl = next_refl(refl, iter) )
+ {
double num = 0.0;
double den = 0.0;
int m;
int redundancy = 0;
- Reflection *new;
+ signed int h, k, l;
+
+ get_indices(refl, &h, &k, &l);
/* For each frame */
for ( m=0; m<n; m++ ) {
double G;
- Reflection *refl;
+ Reflection *f;
/* Don't scale intensities from this image if
* post refinement failed on the last step. */
@@ -407,18 +419,16 @@ static RefList *lsq_intensities(struct image *images, int n,
G = images[m].osf;
- for ( refl = find_refl(images[m].reflections,
- it->h, it->k, it->l);
- refl != NULL;
- refl = next_found_refl(refl) )
+ for ( f = find_refl(images[m].reflections, h, k, l);
+ f != NULL; f = next_found_refl(refl) )
{
double p;
- if ( !get_scalable(refl) ) continue;
- p = get_partiality(refl);
+ if ( !get_scalable(f) ) continue;
+ p = get_partiality(f);
- num += get_intensity(refl) * p * G;
+ num += get_intensity(f) * p * G;
den += pow(p, 2.0) * pow(G, 2.0);
redundancy++;
@@ -426,19 +436,17 @@ static RefList *lsq_intensities(struct image *images, int n,
}
- if ( !isnan(num/den) ) {
- new = add_refl(full, it->h, it->k, it->l);
- set_int(new, num/den);
- set_redundancy(new, redundancy);
- } else {
- ERROR("Couldn't calculate LSQ full intensity for"
- "%3i %3i %3i\n", it->h, it->k, it->l);
- /* Doom is probably impending */
+ set_int(refl, num/den);
+
+ //STATUS("%3i %3i %3i %i %i\n", h, k, l,
+ // redundancy, get_redundancy(refl));
+
+ if ( get_redundancy(refl) != redundancy ) {
+ ERROR("Didn't find all the expected parts for"
+ " %3i %3i %3i\n", h, k, l);
}
}
-
- return full;
}
@@ -459,23 +467,78 @@ static void normalise_osfs(struct image *images, int n)
}
+static RefList *guess_scaled_reflections(struct image *images, int n)
+{
+ RefList *scalable;
+ int i;
+
+ scalable = reflist_new();
+ for ( i=0; i<n; i++ ) {
+
+ Reflection *refl;
+ RefListIterator *iter;
+
+ /* Don't scale intensities from this image if
+ * post refinement failed on the last step. */
+ if ( images[i].pr_dud ) continue;
+
+ for ( refl = first_refl(images[i].reflections, &iter);
+ refl != NULL;
+ refl = next_refl(refl, iter) )
+ {
+ if ( get_scalable(refl) ) {
+
+ signed int h, k, l;
+ Reflection *f;
+ int red;
+
+ get_indices(refl, &h, &k, &l);
+ f = find_refl(scalable, h, k, l);
+ if ( f == NULL ) {
+ f = add_refl(scalable, h, k, l);
+ }
+
+ red = get_redundancy(f);
+ set_redundancy(f, red+1);
+
+ }
+ }
+
+ }
+
+ return scalable;
+}
+
+
+static void show_scale_factors(struct image *images, int n)
+{
+ int i;
+ for ( i=0; i<n; i++ ) {
+ STATUS("Image %4i: scale factor %5.2f\n", i, images[i].osf);
+ }
+}
+
+
/* Scale the stack of images */
-RefList *scale_intensities(struct image *images, int n, const char *sym,
- ReflItemList *obs, char *cref, double *reference)
+RefList *scale_intensities(struct image *images, int n, RefList *reference)
{
int m;
RefList *full;
int i;
double max_shift;
+ char *cref;
/* Start with all scale factors equal */
for ( m=0; m<n; m++ ) images[m].osf = 1.0;
+ cref = find_common_reflections(images, n);
+ full = guess_scaled_reflections(images, n);
+
/* Iterate */
i = 0;
do {
- max_shift = iterate_scale(images, n, obs, sym, cref, reference);
+ max_shift = iterate_scale(images, n, full, cref, reference);
STATUS("Scaling iteration %2i: max shift = %5.2f\n",
i+1, max_shift);
i++;
@@ -483,6 +546,10 @@ RefList *scale_intensities(struct image *images, int n, const char *sym,
} while ( (max_shift > 0.01) && (i < MAX_CYCLES) );
- full = lsq_intensities(images, n, obs, sym);
+ free(cref);
+
+ show_scale_factors(images, n);
+
+ lsq_intensities(images, n, full);
return full;
}
diff --git a/src/hrs-scaling.h b/src/hrs-scaling.h
index f65b7988..a18d3a0b 100644
--- a/src/hrs-scaling.h
+++ b/src/hrs-scaling.h
@@ -20,10 +20,8 @@
#include "image.h"
-extern RefList *scale_intensities(struct image *images, int n, const char *sym,
- ReflItemList *obs, char *cref,
- double *reference);
+extern RefList *scale_intensities(struct image *images, int n,
+ RefList *reference);
-extern char *find_common_reflections(struct image *images, int n);
#endif /* HRS_SCALING_H */
diff --git a/src/partialator.c b/src/partialator.c
index 9e09b4c7..252d6cfd 100644
--- a/src/partialator.c
+++ b/src/partialator.c
@@ -66,8 +66,6 @@ static void show_help(const char *s)
struct refine_args
{
- const char *sym;
- ReflItemList *obs;
RefList *full;
struct image *image;
FILE *graph;
@@ -91,7 +89,7 @@ static void refine_image(void *task, int id)
struct image *image = pargs->image;
image->id = id;
- pr_refine(image, pargs->full, pargs->sym);
+ pr_refine(image, pargs->full);
}
@@ -123,15 +121,13 @@ static void done_image(void *vqargs, void *task)
static void refine_all(struct image *images, int n_total_patterns,
- struct detector *det, const char *sym,
- ReflItemList *obs, RefList *full, int nthreads,
+ struct detector *det,
+ RefList *full, int nthreads,
FILE *graph, FILE *pgraph)
{
struct refine_args task_defaults;
struct queue_args qargs;
- task_defaults.sym = sym;
- task_defaults.obs = obs;
task_defaults.full = full;
task_defaults.image = NULL;
task_defaults.graph = graph;
@@ -152,7 +148,7 @@ static void refine_all(struct image *images, int n_total_patterns,
/* Decide which reflections can be scaled */
-static int select_scalable_reflections(RefList *list, ReflItemList *sc_l)
+static int select_scalable_reflections(RefList *list, RefList *reference)
{
Reflection *refl;
RefListIterator *iter;
@@ -162,29 +158,24 @@ static int select_scalable_reflections(RefList *list, ReflItemList *sc_l)
refl != NULL;
refl = next_refl(refl, iter) ) {
- int scalable = 1;
+ int sc = 1;
double v;
- if ( get_partiality(refl) < 0.1 ) scalable = 0;
+ if ( get_partiality(refl) < 0.1 ) sc = 0;
v = fabs(get_intensity(refl));
- if ( v < 0.1 ) scalable = 0;
- set_scalable(refl, scalable);
-
- if ( scalable ) {
+ if ( v < 0.1 ) sc = 0;
+ /* If we are scaling against a reference set, we additionally
+ * require that this reflection is in the reference list. */
+ if ( reference != NULL ) {
signed int h, k, l;
-
- nobs++;
-
- /* Add (asymmetric) indices to list */
get_indices(refl, &h, &k, &l);
-
- if ( !find_item(sc_l, h, k, l) ) {
- add_item(sc_l, h, k, l);
- }
-
+ if ( find_refl(reference, h, k, l) == NULL ) sc = 0;
}
+ set_scalable(refl, sc);
+
+ if ( sc ) nobs++;
}
return nobs;
@@ -192,7 +183,7 @@ static int select_scalable_reflections(RefList *list, ReflItemList *sc_l)
static void select_reflections_for_refinement(struct image *images, int n,
- ReflItemList *scalable)
+ RefList *full, int have_reference)
{
int i;
@@ -200,6 +191,11 @@ static void select_reflections_for_refinement(struct image *images, int n,
Reflection *refl;
RefListIterator *iter;
+ int n_acc = 0;
+ int n_nomatch = 0;
+ int n_noscale = 0;
+ int n_fewmatch = 0;
+ int n_ref = 0;
for ( refl = first_refl(images[i].reflections, &iter);
refl != NULL;
@@ -208,14 +204,47 @@ static void select_reflections_for_refinement(struct image *images, int n,
signed int h, k, l;
int sc;
+ n_ref++;
+
+ /* We require that the reflection itself is scalable
+ * (i.e. sensible partiality and intensity) and that
+ * the "full" estimate of this reflection is made from
+ * at least two parts. */
get_indices(refl, &h, &k, &l);
sc = get_scalable(refl);
+ if ( !sc ) {
+
+ n_noscale++;
+
+ } else {
+
+ Reflection *f = find_refl(full, h, k, l);
+
+ if ( f != NULL ) {
+
+ int r = get_redundancy(f);
+ if ( (r >= 2) || have_reference ) {
+ set_refinable(refl, 1);
+ n_acc++;
+ } else {
+ n_fewmatch++;
+ }
+
+ } else {
+ n_nomatch++;
+ }
- if ( sc && find_item(scalable, h, k, l) ) {
- set_refinable(refl, 1);
}
}
+ STATUS("Image %4i: %i guide reflections accepted "
+ "(%i not scalable, %i few matches, %i total)\n",
+ i, n_acc, n_noscale, n_fewmatch, n_ref);
+
+ /* This would be a silly situation, since there must be a match
+ * if THIS pattern has a scalable part of the reflection! */
+ assert(n_nomatch == 0);
+
}
}
@@ -230,7 +259,6 @@ int main(int argc, char *argv[])
FILE *fh;
int nthreads = 1;
struct detector *det;
- ReflItemList *scalable;
int i;
int n_total_patterns;
struct image *images;
@@ -240,13 +268,12 @@ int main(int argc, char *argv[])
int n_found = 0;
int n_expected = 0;
int n_notfound = 0;
- char *cref;
int n_usable_patterns = 0;
int nobs;
char *reference_file = NULL;
- double *reference = NULL;
- RefList *reference_list = NULL;
+ RefList *reference = NULL;
int n_dud;
+ int have_reference = 0;
/* Long options */
const struct option longopts[] = {
@@ -359,9 +386,9 @@ int main(int argc, char *argv[])
list = read_reflections(reference_file);
free(reference_file);
if ( list == NULL ) return 1;
- reference_list = asymmetric_indices(list, sym);
+ reference = asymmetric_indices(list, sym);
reflist_free(list);
- reference = intensities_from_list(reference_list);
+ have_reference = 1;
}
@@ -382,7 +409,6 @@ int main(int argc, char *argv[])
/* Fill in what we know about the images so far */
rewind(fh);
- scalable = new_items();
nobs = 0;
for ( i=0; i<n_total_patterns; i++ ) {
@@ -427,9 +453,10 @@ int main(int argc, char *argv[])
cur->reflections = as;
predict_corresponding_reflections(cur, sym, &n_expected,
- &n_found, &n_notfound);
+ &n_found, &n_notfound);
- nobs += select_scalable_reflections(cur->reflections, scalable);
+ nobs += select_scalable_reflections(cur->reflections,
+ reference);
progress_bar(i, n_total_patterns-1, "Loading pattern data");
n_usable_patterns++;
@@ -438,51 +465,13 @@ int main(int argc, char *argv[])
fclose(fh);
STATUS("Found %5.2f%% of the expected peaks (missed %i of %i).\n",
100.0 * (double)n_found / n_expected, n_notfound, n_expected);
- STATUS("Mean measurements per scalable unique reflection: %5.2f\n",
- (double)nobs / num_items(scalable));
-
- cref = find_common_reflections(images, n_usable_patterns);
/* Make initial estimates */
STATUS("Performing initial scaling.\n");
- full = scale_intensities(images, n_usable_patterns, sym,
- scalable, cref, reference);
-
- select_reflections_for_refinement(images, n_usable_patterns, scalable);
-
- for ( i=0; i<num_items(scalable); i++ ) {
- Reflection *f;
- struct refl_item *it = get_item(scalable, i);
- f = find_refl(full, it->h, it->k, it->l);
- if ( f == NULL ) {
- ERROR("%3i %3i %3i was designated scalable, but no"
- " full intensity was recorded.\n",
- it->h, it->k, it->l);
- }
- }
+ full = scale_intensities(images, n_usable_patterns, reference);
- for ( i=0; i<n_usable_patterns; i++ ) {
-
- Reflection *refl;
- RefListIterator *iter;
-
- for ( refl = first_refl(images[i].reflections, &iter);
- refl != NULL;
- refl = next_refl(refl, iter) )
- {
- signed int h, k, l;
-
- if ( !get_scalable(refl) ) continue;
- get_indices(refl, &h, &k, &l);
-
- if ( find_item(scalable, h, k, l) == 0 ) {
- ERROR("%3i %3i %3i in image %i is scalable"
- " but is not in the list of scalable"
- " reflections.\n", h, k, l, i);
- }
- }
-
- }
+ select_reflections_for_refinement(images, n_usable_patterns, full,
+ have_reference);
/* Iterate */
for ( i=0; i<n_iter; i++ ) {
@@ -491,6 +480,7 @@ int main(int argc, char *argv[])
FILE *fhp;
char filename[1024];
int j;
+ RefList *comp;
STATUS("Post refinement cycle %i of %i\n", i+1, n_iter);
@@ -508,14 +498,17 @@ int main(int argc, char *argv[])
/* Nothing will be written later */
}
- if ( reference == NULL ) reference_list = full;
+ if ( reference == NULL ) {
+ comp = full;
+ } else {
+ comp = reference;
+ }
/* Refine the geometry of all patterns to get the best fit */
- refine_all(images, n_usable_patterns, det, sym, scalable,
- reference_list, nthreads, fhg, fhp);
+ refine_all(images, n_usable_patterns, det,
+ comp, nthreads, fhg, fhp);
nobs = 0;
- clear_items(scalable);
for ( j=0; j<n_usable_patterns; j++ ) {
struct image *cur = &images[j];
@@ -525,16 +518,17 @@ int main(int argc, char *argv[])
&n_notfound);
nobs += select_scalable_reflections(cur->reflections,
- scalable);
+ reference);
}
- STATUS("Mean measurements per scalable unique "
- "reflection: %5.2f\n", (double)nobs/num_items(scalable));
/* Re-estimate all the full intensities */
reflist_free(full);
full = scale_intensities(images, n_usable_patterns,
- sym, scalable, cref, reference);
+ reference);
+
+ select_reflections_for_refinement(images, n_usable_patterns,
+ full, have_reference);
fclose(fhg);
fclose(fhp);
@@ -557,15 +551,12 @@ int main(int argc, char *argv[])
reflist_free(images[i].reflections);
}
reflist_free(full);
- delete_items(scalable);
free(sym);
free(outfile);
free_detector_geometry(det);
free(beam);
- free(cref);
if ( reference != NULL ) {
- free(reference);
- reflist_free(reference_list);
+ reflist_free(reference);
}
for ( i=0; i<n_usable_patterns; i++ ) {
cell_free(images[i].indexed_cell);
diff --git a/src/post-refinement.c b/src/post-refinement.c
index defac958..d8783c6b 100644
--- a/src/post-refinement.c
+++ b/src/post-refinement.c
@@ -316,8 +316,7 @@ static gsl_vector *solve_svd(gsl_vector *v, gsl_matrix *M)
/* Perform one cycle of post refinement on 'image' against 'full' */
-static double pr_iterate(struct image *image, const RefList *full,
- const char *sym)
+static double pr_iterate(struct image *image, const RefList *full)
{
gsl_matrix *M;
gsl_vector *v;
@@ -352,10 +351,11 @@ static double pr_iterate(struct image *image, const RefList *full,
/* Find the full version */
get_indices(refl, &ha, &ka, &la);
match = find_refl(full, ha, ka, la);
- if ( match == NULL ) continue;
- /* Some reflections may have recently become scalable, but
- * scale_intensities() might not yet have been called, so the
- * full version may not have been calculated yet. */
+ if ( match == NULL ) {
+ ERROR("%3i %3i %3i isn't in the reference list, so why "
+ " is it marked as refinable?\n", ha, ka, la);
+ continue;
+ }
I_full = image->osf * get_intensity(match);
/* Actual measurement of this reflection from this pattern? */
@@ -402,9 +402,9 @@ static double pr_iterate(struct image *image, const RefList *full,
}
//show_matrix_eqn(M, v, NUM_PARAMS);
- //STATUS("%i reflections were scalable\n", nref);
+ //STATUS("%i reflections went into the equations.\n", nref);
if ( nref == 0 ) {
- ERROR("No reflections left to scale!\n");
+ ERROR("No guide reflections to refine with!\n");
gsl_matrix_free(M);
gsl_vector_free(v);
return 0.0;
@@ -434,8 +434,7 @@ static double pr_iterate(struct image *image, const RefList *full,
}
-static double mean_partial_dev(struct image *image,
- const RefList *full, const char *sym)
+static double guide_dev(struct image *image, const RefList *full)
{
double dev = 0.0;
@@ -452,13 +451,11 @@ static double mean_partial_dev(struct image *image,
Reflection *full_version;
double I_full, I_partial;
- if ( !get_scalable(refl) ) continue;
+ if ( !get_refinable(refl) ) continue;
get_indices(refl, &h, &k, &l);
assert((h!=0) || (k!=0) || (l!=0));
- if ( !get_scalable(refl) ) continue;
-
full_version = find_refl(full, h, k, l);
if ( full_version == NULL ) continue;
/* Some reflections may have recently become scalable, but
@@ -481,14 +478,14 @@ static double mean_partial_dev(struct image *image,
}
-void pr_refine(struct image *image, const RefList *full, const char *sym)
+void pr_refine(struct image *image, const RefList *full)
{
double max_shift, dev;
int i;
const int verbose = 1;
if ( verbose ) {
- dev = mean_partial_dev(image, full, sym);
+ dev = guide_dev(image, full);
STATUS("\n"); /* Deal with progress bar */
STATUS("Before iteration: dev = %10.5e\n",
dev);
@@ -506,15 +503,14 @@ void pr_refine(struct image *image, const RefList *full, const char *sym)
cell_get_reciprocal(image->indexed_cell, &asx, &asy, &asz,
&bsx, &bsy, &bsz, &csx, &csy, &csz);
- max_shift = pr_iterate(image, full, sym);
+ max_shift = pr_iterate(image, full);
- update_partialities(image, sym);
+ update_partialities(image);
if ( verbose ) {
- dev = mean_partial_dev(image, full, sym);
+ dev = guide_dev(image, full);
STATUS("PR Iteration %2i: max shift = %10.2f"
- " dev = %10.5e\n",
- i+1, max_shift, dev);
+ " dev = %10.5e\n", i+1, max_shift, dev);
}
i++;
diff --git a/src/post-refinement.h b/src/post-refinement.h
index 039a69c8..28d65877 100644
--- a/src/post-refinement.h
+++ b/src/post-refinement.h
@@ -41,8 +41,7 @@ enum {
};
-extern void pr_refine(struct image *image, const RefList *full,
- const char *sym);
+extern void pr_refine(struct image *image, const RefList *full);
/* Exported so it can be poked by tests/pr_gradient_check */
extern double gradient(struct image *image, int k, Reflection *refl, double r);