/* * partialator.c * * Scaling and post refinement for coherent nanocrystallography * * (c) 2006-2011 Thomas White * * Part of CrystFEL - crystallography with a FEL * */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" #include "hdf5-file.h" #include "symmetry.h" #include "stream.h" #include "geometry.h" #include "peaks.h" #include "thread-pool.h" #include "beam-parameters.h" #include "post-refinement.h" #include "hrs-scaling.h" #include "reflist.h" #include "reflist-utils.h" static void show_help(const char *s) { printf("Syntax: %s [options]\n\n", s); printf( "Scaling and post refinement for coherent nanocrystallography.\n" "\n" " -h, --help Display this help message.\n" "\n" " -i, --input= Specify the name of the input 'stream'.\n" " (must be a file, not e.g. stdin)\n" " -o, --output= Output filename. Default: facetron.hkl.\n" " -g. --geometry= Get detector geometry from file.\n" " -b, --beam= Get beam parameters from file, which provides\n" " initial values for parameters, and nominal\n" " wavelengths if no per-shot value is found in \n" " an HDF5 file.\n" " -y, --symmetry= Merge according to symmetry .\n" " -n, --iterations= Run cycles of scaling and post-refinement.\n" " --reference= Refine images against reflections in ,\n" " instead of taking the mean of the intensity\n" " estimates.\n" "\n" " -j Run analyses in parallel.\n"); } struct refine_args { const char *sym; ReflItemList *obs; RefList *full; struct image *image; FILE *graph; FILE *pgraph; }; struct queue_args { int n; int n_done; int n_total_patterns; struct image *images; struct refine_args task_defaults; }; static void refine_image(void *task, int id) { struct refine_args *pargs = task; struct image *image = pargs->image; image->id = id; pr_refine(image, pargs->full, pargs->sym); } static void *get_image(void *vqargs) { struct refine_args *task; struct queue_args *qargs = vqargs; task = malloc(sizeof(struct refine_args)); memcpy(task, &qargs->task_defaults, sizeof(struct refine_args)); task->image = &qargs->images[qargs->n]; qargs->n++; return task; } static void done_image(void *vqargs, void *task) { struct queue_args *qargs = vqargs; qargs->n_done++; progress_bar(qargs->n_done, qargs->n_total_patterns, "Refining"); free(task); } static void refine_all(struct image *images, int n_total_patterns, struct detector *det, const char *sym, ReflItemList *obs, RefList *full, int nthreads, FILE *graph, FILE *pgraph) { struct refine_args task_defaults; struct queue_args qargs; task_defaults.sym = sym; task_defaults.obs = obs; task_defaults.full = full; task_defaults.image = NULL; task_defaults.graph = graph; task_defaults.pgraph = pgraph; qargs.task_defaults = task_defaults; qargs.n = 0; qargs.n_done = 0; qargs.n_total_patterns = n_total_patterns; qargs.images = images; /* Don't have threads which are doing nothing */ if ( n_total_patterns < nthreads ) nthreads = n_total_patterns; run_threads(nthreads, refine_image, get_image, done_image, &qargs, n_total_patterns, 0, 0, 0); } /* Decide which reflections can be scaled */ static int select_scalable_reflections(RefList *list, ReflItemList *sc_l) { Reflection *refl; RefListIterator *iter; int nobs = 0; for ( refl = first_refl(list, &iter); refl != NULL; refl = next_refl(refl, iter) ) { int scalable = 1; double v; if ( get_partiality(refl) < 0.1 ) scalable = 0; v = fabs(get_intensity(refl)); if ( v < 0.1 ) scalable = 0; set_scalable(refl, scalable); if ( scalable ) { signed int h, k, l; nobs++; /* Add (asymmetric) indices to list */ get_indices(refl, &h, &k, &l); if ( !find_item(sc_l, h, k, l) ) { add_item(sc_l, h, k, l); } } } return nobs; } static void select_reflections_for_refinement(struct image *images, int n, ReflItemList *scalable) { int i; for ( i=0; idet = det; if ( read_chunk(fh, cur) != 0 ) { /* Should not happen, because we counted the patterns * earlier. */ ERROR("Failed to read chunk from the input stream.\n"); return 1; } /* Won't be needing this, if it exists */ image_feature_list_free(cur->features); cur->features = NULL; /* "n_usable_patterns" will not be incremented in this case */ if ( cur->indexed_cell == NULL ) continue; /* Fill in initial estimates of stuff */ cur->div = beam->divergence; cur->bw = beam->bandwidth; cur->width = det->max_fs; cur->height = det->max_ss; cur->osf = 1.0; cur->profile_radius = 0.0001e9; cur->pr_dud = 0; /* Muppet proofing */ cur->data = NULL; cur->flags = NULL; cur->beam = NULL; /* This is the raw list of reflections */ as = asymmetric_indices(cur->reflections, sym); reflist_free(cur->reflections); cur->reflections = as; predict_corresponding_reflections(cur, sym, &n_expected, &n_found, &n_notfound); nobs += select_scalable_reflections(cur->reflections, scalable); progress_bar(i, n_total_patterns-1, "Loading pattern data"); n_usable_patterns++; } fclose(fh); STATUS("Found %5.2f%% of the expected peaks (missed %i of %i).\n", 100.0 * (double)n_found / n_expected, n_notfound, n_expected); STATUS("Mean measurements per scalable unique reflection: %5.2f\n", (double)nobs / num_items(scalable)); cref = find_common_reflections(images, n_usable_patterns); /* Make initial estimates */ STATUS("Performing initial scaling.\n"); full = scale_intensities(images, n_usable_patterns, sym, scalable, cref, reference); select_reflections_for_refinement(images, n_usable_patterns, scalable); for ( i=0; ih, it->k, it->l); if ( f == NULL ) { ERROR("%3i %3i %3i was designated scalable, but no" " full intensity was recorded.\n", it->h, it->k, it->l); } } for ( i=0; ireflections, scalable); } STATUS("Mean measurements per scalable unique " "reflection: %5.2f\n", (double)nobs/num_items(scalable)); /* Re-estimate all the full intensities */ reflist_free(full); full = scale_intensities(images, n_usable_patterns, sym, scalable, cref, reference); fclose(fhg); fclose(fhp); } STATUS("Final scale factors:\n"); n_dud = 0; for ( i=0; i