Source code for galpak.lib.galpak3d

# coding=utf-8

import os,re
import sys
import logging
from scipy import ndimage, interpolate
from matplotlib import pyplot as plot

# OPTIONAL IMPORTS
try:
    import bottleneck as bn
except ImportError:
    import numpy as bn
try:
    from astropy.io.fits import Header
except ImportError:
    from pyfits import Header
try:
    import asciitable
except ImportError:
    logging.error('Please install the python package `asciitable`.')

# LOCAL IMPORTS
import math_utils
from galaxy_parameters import GalaxyParameters, GalaxyParametersError
from instruments import *
from hyperspectral_cube import HyperspectralCube as Cube
from string_stdout import StringStdOut

# LOGGING CONFIGURATION
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('GalPaK')

# CONSTANTS
G = 6.67384e-11  # Gravitational Constant (m^3.kg^-1.s^-2)
SOL_MASS = 2e30  # Solar Mass (kg)
PARSEC = 3e16    # Parsec (m)


[docs]class GalPaK3D: """ GalPaK3D is a tool to extract Galaxy Parameters and Kinematics from 3-Dimensional data, using reverse deconvolution with Bayesian analysis Markov Chain Monte Carlo. (random walk) cube: HyperspectralCube|string The actual data on which we'll work ; it should contain only one galaxy. Can be a HyperspectralCube object, a string filename to a FITS file, or even MPDAF's ``mpdaf.obj.Cube``. redshift: float The redshift of the galaxy we're observing in the measure cube. This is used in the estimation of the mass of the galaxy. If this is not set, GalPak will not try to compute the mass. This has no influence over the MCMC run, it will still try to guess the galaxy's z position. seeing: float Aka the Point Spread Function's Full Width Half Maximum. This convenience parameter, when provided, will override the FWHM value of the instrument's PSF. line: None[default] | dict to fit doublets, use a dictionary with line['wave']=[l, lref] eg. [3726.2, 3728.9] # Observed or Rest; line['ratio']=[0.8, 1.0] eg. [0.8, 1.0] ; the primary line for redshifts is the reddest instrument: Instrument The instrument configuration to use when simulating convolution. The default is :class:`MUSE <galpak.MUSE>`. crval3: float A value for the cube's header's CRVAL3 when it is missing. You should update your cube's header. crpix3: float A value for the cube's header's CRPIX3 when it is missing. You should update your cube's header. cunit3: float A value for the cube's header's CUNIT3 when it is missing. You should update your cube's header. cdelt3: float A value for the cube's header's CDELT3 when it is missing. You should update your cube's header. force_header_update: bool Set to True to force the update of the above header cards, when their values are not missing. Note: These will not be saved into the FITS file. (if the cube is one) """ def __init__(self, cube, variance=None, redshift=None, seeing=None, line=None, instrument=None, verbose=True, flux_image=None, crval3=None, crpix3=None, cunit3=None, cdelt3=None, ctype3=None, cunit1=None, force_header_update=False): # DEVS : If you change the signature above, # remember to update the run() in api.py # Assign the logger to a property for convenience, and set verbosity self.logger = logger self._set_verbose(verbose) # Set up the input data cube if isinstance(cube, basestring): cube = Cube.from_file(cube) if cube.var is not None: self.logger.info('Found a variance extention') variance = Cube(data=cube.var) if not isinstance(cube, Cube): try: import mpdaf if isinstance(cube, mpdaf.obj.Cube): if variance is None: if cube.var is not None: self.logger.info('Found a variance extention') variance = Cube(data=cube.var) cube = Cube.from_mpdaf(cube) else: raise TypeError("Provided cube is not a HyperspectralCube " "nor mpdaf's Cube") except ImportError: raise TypeError("Provided cube is not a HyperspectralCube") if cube.is_empty(): raise ValueError("Provided cube is empty") self.cube = cube if not self.cube.has_header(): self.logger.info("Reading hyperspectral cube without header. " "Creating a minimal one.") self.cube.header = Header() # GalPaK needs a sane Cube #self.cube.sanitize() # Require the cube to be of odd depth #if not (cube.shape[0] & 1): # raise IOError("The input cube MUST have an odd depth.") # Set up the cube variance if variance is not None: if isinstance(variance, basestring): self.logger.info("Read provided variance cube %s into Cube." % (variance)) variance = Cube.from_file(variance) elif isinstance(variance,Cube): if variance.data is None: self.logger.warning("Provided variance cube is empty.") elif not (isinstance(variance,np.float) or isinstance(variance, Cube)): variance.data=variance raise TypeError("Provided variance is not a Cube nor a float") self.variance = variance # Set up the measure context self.redshift = redshift self.line = line # Set up the instrument's context if instrument is None: instrument = MUSE() self.logger.warning('Using the MUSE instrument per default. ' 'You should specify your own instrument.') if isinstance(instrument, basestring): raise ValueError("Instrument needs to be an instance of " "Instrument, not a string.") if not isinstance(instrument, Instrument): raise ValueError("Instrument needs to be an instance of Instrument") self.instrument = instrument ## Set default cube specs from instrument when missing from headers # as we don't want to rely on the input fits having properly set headers. # So, we aggregate in the cube our own specs (in " and µm) for our personal use : # - xy_step # - z_step # - z_central # 1. Patch up the Cube's missing values try: self.cube.patch( crval3=crval3, crpix3=crpix3, cunit3=cunit3, cdelt3=cdelt3, ctype3=ctype3, cunit1=cunit1, force=force_header_update ) except ValueError: raise ValueError("The cube already has one of the header cards " "you're trying to provide. " "Use the force_header_update= argument.") self.logger.debug('Header after patch : %s' % self.cube) #set xy_step z_step and z_central # 2. Set cube pixel size from the the instrument (defaults) if header incomplete self.cube.defaults_from_instrument(instrument=instrument) # 3. self.cube.initialize_self_cube() # 4. Calibrate the instrument with the cube self.instrument.use_pixelsize_from_cube(self.cube) self.logger.debug('z central : %4.e' % (self.cube.z_central) ) # Override the PSF FWHM (aka. seeing) if provided if seeing is not None and self.instrument.psf is not None: try: self.instrument.psf.fwhm = seeing except AttributeError: raise IOError("You provided a seeing but your instrument's PSF has no FWHM.") # Prepare output attributes self.acceptance_rate = 100. self.galaxy = GalaxyParameters() self.stdev = GalaxyParameters() self.chain = None self.sub_chain = None self.psf3d = None self.convolved_cube = None self.deconvolved_cube = None self.residuals_cube = None self.residuals_map = None self.variance_cube = None #true intrinsic maps self.true_flux_map = None self.true_velocity_map = None self.true_disp_map = None #observed maps self.obs_flux_map = None self.obs_velocity_map = None self.obs_disp_map = None self.dynamical_mass = None #run mcmc self.max_iterations = None self.method = None self.chain_fraction = None self.percentile = None self.initial_parameters = None self.known_parameters = None self.random_scale = None #self.flux_profile = None #self.rotation_curve = None #self.disk_dispersion = None self.disk_model = None # Update the measure context, if provided if redshift is not None: self.redshift = redshift # Compute a flux estimation # fixme: add weighted sum with variance if present flux = bn.nansum(self.cube.data) if flux < 0: self.logger.warning( "WARNING: Initial flux (%4.2e) is <0 -- " "likely wrong, will recompute it ignoring <0 values" % flux ) flux = np.sum(np.where(self.cube.data > 0, self.cube.data, 0)) self.logger.warning('Initial flux is now %4.2e' % flux) self.logger.info('TIP: use initial_parameters to set the flux') else: self.logger.info('Initial flux is %4.2e' % flux) # Default initial parameters self.initial_parameters = GalaxyParameters( radius=3.0, turnover_radius=1.0, inclination=50, maximum_velocity=100.0, velocity_dispersion=5.0, flux=flux ) # Default boundaries self.min_boundaries = GalaxyParameters( x=self.cube.shape[2] / 3., y=self.cube.shape[1] / 3., z=self.cube.shape[0] / 3., flux=0., radius=0.199, inclination=-0.5, pa=-180, turnover_radius=0.01, maximum_velocity=-350., velocity_dispersion=0. ) # changed max radius to 3/8th of cube size. self.max_boundaries = GalaxyParameters( x=self.cube.shape[2] * 2 / 3., y=self.cube.shape[1] * 2 / 3., z=self.cube.shape[0] * 2 / 3., flux=3. * flux, radius= 0.5 * (self.cube.shape[1]+self.cube.shape[2]) * 3 / 8., inclination=90.5, pa=180, turnover_radius=1. / self.cube.xy_step, maximum_velocity=350., velocity_dispersion=180. ) if self.variance is not None: if isinstance(variance,Cube): self.logger.info("Using provided variance : %s" % self.variance.filename) variance_cube = self.variance.data self.logger.info("Replacing 0s in the variance cube by 1e12") variance_cube = np.where(variance_cube == 0.0, 1e12, variance_cube) if isinstance(variance,float): self.logger.info("Using provided variance : %s" % self.variance) variance_cube = self.variance #variance_cube = np.where(np.isnan(cube_data), 1e12, variance_cube) else: # Clip data, and collect standard deviation sigma self.logger.warning("No variance provided. Estimating Variance from edge statistics") clipped_data, clip_sigma, __ = math_utils.median_clip(self.cube.data[:, 2:-4, 2:4], 2.5)#yband at x[2:4] self.logger.info("Computing variance from the edges: Var=%.f" % clip_sigma) # Adjust stdev margin if it is zero, as we'll divide with it later on if not np.isfinite(clip_sigma): clipped_data, clip_sigma, __ = math_utils.median_clip(self.cube.data, 2.5) self.logger.info("reComputing variance from the whole cube: Var=%.e" % clip_sigma) if np.size(clip_sigma) == 1 and clip_sigma == 0: clip_sigma = 1e-20 variance_cube = clip_sigma ** 2 # Save the variance cube self.variance_cube = variance_cube # cube of sigma^2 self.error_cube = np.sqrt(self.variance_cube) # cube of sigmas # Provide the user with some feedback self.logger.info("Setting up with the following setup : %s" % self.instrument) #self.logger.info("""Will use hyperspectral cube attributes : #xy_step = %.3e " #z_step = %.3e %s #z_central = %.3e %s #""" % (self.cube.xy_step, self.cube.z_step, self.cube.z_cunit, self.cube.z_central, self.cube.z_cunit)) # We're sometimes dividing by zero, on purpose (its FASTER) # Uncomment this to annihilate console logs -- but it's dirty #previous_error_status = np.seterr(all='ignore') #np.seterr(**previous_error_status)
[docs] def run_mcmc(self, max_iterations=15000, method_chain='last', last_chain_fraction=60, percentile=95, flux_profile='exponential', hz_profile='gaussian', flux_image= None, rotation_curve='arctan', disk_dispersion='thick', chi_stat = 'gaussian', redshift=None, min_boundaries=None, max_boundaries=None, known_parameters=None, initial_parameters=None, min_acceptance_rate=5.0, random_scale=None, verbose=True): """ Main method_chain of GalPak, computes and returns the galaxy parameters as a :class:`GalaxyParameters <galpak.GalaxyParameters>` object using reverse deconvolution with a MCMC. Also fills up the following attributes : - chain - psf3d - deconvolved_cube - convolved_cube - residuals_cube (Data-Model in units of sigma) - residuals_map (average of data-model in units of sigma or = 1/N_z. Sum_z Residuals_cube. sqrt(Nz) ) - acceptance_rate - galaxy (same object as returned value) with Vmax forced to be positive [and 180 added to PA] - stdev (also available as galaxy.stdev) - true_flux_map - true_velocity_map - true_disp_map Stops iteration if acceptance rate drops below ``min_acceptance_rate`` % or when ``max_iterations`` are reached. max_iterations: int Maximum number of useful iterations. method_chain: 'chi_sorted'|'chi_min'|'last' Method used to determine the best parameters from the chain. - 'last' (default) : mean of the last_chain_fraction(%) last parameters of the chain - 'chi_sorted' : mean of the last_chain_fraction(%) best fit parameters of the chain - 'chi_min' : mean of last_chain_fraction(%) of the chain around the min chi last_chain_fraction: int Last Chain fraction (in %) used to compute the best parameters. Defaults to 60. flux_profile: 'exponential' | 'gaussian' | 'de_vaucouleurs' | 'user' The flux profile of the observed galaxy. The default is 'exponential'. See http://en.wikipedia.org/wiki/Sersic%27s_law If 'user' provided flux map, specify flux_image. Currently unsupported hz_profile: 'gaussian' [default] | 'exponential' | 'sech2' The flux profile perpendicular to the disk. The default is 'gaussian' . flux_image: string filename of user provided flux image (fits) to be used UNSUPPORTED rotation_curve: 'arctan' | 'mass' | 'exp' | 'tanh' | 'isothermal' The profile of the velocity v(r) can be in - 'arctan' (default) : ~ Vmax arctan(r/rV), rV=turnover radius - 'tanh' : Vmax tanh(r/rV), rV=turnover radius - 'exp' : inverted exponential, 1-exp(-r/rV) - 'mass' : a constant light-to-mass ratio v(r)=sqrt(G m(<r) / r) - 'isothermal' : an isothermal rotation curve v(r)=Vmax * sqrt(1-rV/r * arctan(r/rV) disk_dispersion: 'thick'|'thin'|'infinitely_thin' The local disk dispersion from the rotation curve and disk thickness from Binney & Tremaine 2008 see Genzel et al. 2008. GalPak has 3 components for the dispersion: - a component from the rotation curve arising from mixing velocities of a disk with non-zero thickness - a component from the local disk dispersion specified by ``disk_dispersion`` - a spatially constant dispersion, [which is the output parameter ``velocity_dispersion``] chi_stat: 'gaussian' [default] | 'Mighell' | 'Neyman' The chi2 statitics https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html Mighell http://adsabs.harvard.edu/abs/1999ApJ...518..380M Humphrey 2009, http://adsabs.harvard.edu/abs/2009ApJ...693..822H - 'gaussian' (default): Sum (D - M)^2 / e - 'Neyman' Sum (D - M )^2 / max(D,1) - 'Mighell' Sum (D + min(D,1) - M)^2 / (D+1) redshift: float The redshift of the observed galaxy, used in mass calculus. Will override the redshift provided at init ; this is a convenience parameter. If this is not set anywhere, GalPak will not try to compute the mass. min_boundaries: ndarray|GalaxyParameters The galaxy parameters will never be less than these values. Will override the default minimum boundaries for the parameters. If any of these values are NaN, they will be replaced by the default ones. max_boundaries: ndarray|GalaxyParameters The galaxy parameters will never be more than these values. Will override the default minimum boundaries for the parameters. If any of these values are NaN, they will be replaced by the default ones. known_parameters: ndarray|GalaxyParameters All set parameters in this array will be skipped in the MCMC, the algorithm will not try to guess them. initial_parameters: ndarray|GalaxyParameters The initial galaxy parameters of the MCMC chain. If None, will use the following galaxy.radius = 3.0 galaxy.rv = 1.0 galaxy.maximum_velocity = 0.0 galaxy.velocity_dispersion = 5.0 galaxy.flux = flux If any of these values are NaN, they will be replaced by the mean of the boundaries (default values). min_acceptance_rate: float In %, the acceptance rate is the number of useful iterations divided by the total number of iterations. If it gets below the `failure_rate` treshold, iterations stop. random_scale: float Scale the amplitude of the MCMC sampling by these values. This is an important parameter to adjust for reasonable acceptance rate. The acceptance rate should be around 30-50%. If the acceptance rate is <20-30% (too low), decrease random_scale IF the acceptance rate is >50-60% (too high), increase random_scale verbose: boolean Set to True to output a detailed log of the process. The deconvolution is faster when this is set to False. """ # DEVS : If you change the signature above, # remember to update the run() in api.py # Set verbosity self._set_verbose(verbose) # Save the parameters (the animation uses them) self.max_iterations = max_iterations self.method = method_chain self.chain_fraction = last_chain_fraction self.percentile = percentile #fixme; this is unused self.flux_map_user = flux_image #self.flux_profile = flux_profile #self.rotation_curve = rotation_curve #self.disk_dispersion = disk_dispersion #self.hz_profile = hz_profile self.disk_model = {} self.disk_model['flux_profile'] = flux_profile self.disk_model['hz_profile'] = hz_profile self.disk_model['rotation_curve'] = rotation_curve self.disk_model['disk_dispersion']= disk_dispersion # Update the measure context, if provided if redshift is not None: self.redshift = redshift # Sanitize data and set arbitrary big stdev where NaNs are cube_data = self.cube.data cube_data = np.nan_to_num(cube_data) # Provide the user with some feedback self.logger.info("Starting deconvolution with the following instrument " "properties : %s" % self.instrument) self.logger.info("Flux profile : %s", flux_profile) self.logger.info("Disk dispersion : %s", disk_dispersion) self.logger.info("Rotation curve : %s", rotation_curve) # Memoize various dimensions and shapes shape = np.shape(cube_data) dim_d = np.size(cube_data) # Set boundaries of the parameters of the galaxy eps = 0.01 # in fraction of boundary, arbitrarily small value for closeness to boundaries if min_boundaries is not None: min_boundaries = np.copy(min_boundaries) math_utils.merge_where_nan(min_boundaries, self.min_boundaries) self.min_boundaries = min_boundaries if max_boundaries is not None: max_boundaries = np.copy(max_boundaries) math_utils.merge_where_nan(max_boundaries, self.max_boundaries) self.max_boundaries=max_boundaries bug_boundaries = self.min_boundaries > self.max_boundaries if bug_boundaries.any(): self.logger.debug("Min Boundaries : %s", self.min_boundaries) self.logger.debug("Max Boundaries : %s", self.max_boundaries) raise ValueError("Boundaries are WRONG, min > max") # Create initial galaxy parameters using mean and provided values mean_parameters = np.array((self.max_boundaries + self.min_boundaries) / 2.) #complete default values with mean parameters math_utils.merge_where_nan(self.initial_parameters, mean_parameters) if initial_parameters is not None: #complete input with default values math_utils.merge_where_nan(initial_parameters, self.initial_parameters) #save values self.initial_parameters=initial_parameters galaxy = self.initial_parameters #assign self.logger.info("Initial parameters : %s", self.initial_parameters) dim_p = np.size(self.initial_parameters) # By default, try to guess all parameters should_guess_flags = np.ones(dim_p) # 0: we know it / 1: try to guess #if using input image if flux_profile == 'user': if known_parameters is None: self.logger.WARNING('With input image it is advised to set the inclination fixed with known_parameters') # Flag parameters that we manually specified and don't need to guess if known_parameters is not None: if len(known_parameters) < dim_p: raise ValueError("known_parameters must have a length >= %d", dim_p) else: known_parameters = GalaxyParameters() if not self.line: # Any number works (non-NaN), this is just config known_parameters.line_delta = 1. known_parameters.line_ratio = 1. self.known_parameters = known_parameters #fixes some known parameters for idx in range(dim_p): parameter = known_parameters[idx] if not math.isnan(parameter): should_guess_flags[idx] = 0 sign = math.copysign(1., parameter) self.min_boundaries[idx] = parameter * (1 - eps * sign) - eps self.max_boundaries[idx] = parameter * (1 + eps * sign) + eps galaxy[idx] = parameter self.logger.info("Min : %s", self.min_boundaries) self.logger.info("Max : %s", self.max_boundaries) ## The actual MCMC # Tweaking of random_amplitude vector (Kp coeff, as pid) random_amplitude = np.array(np.sqrt((self.min_boundaries - self.max_boundaries) ** 2 / 12.) * dim_p / dim_d) random_amplitude[3] *= 0.1 # flux since we start at ~flux since flux is an integrated quantity random_amplitude[9] *= 1.5 # velocity dispersion random_amplitude *= 3 # Scale MCMC if needed // allowing vectors if random_scale is not None: if np.size(random_scale) != 1: math_utils.merge_where_nan(random_scale, np.ones_like(random_amplitude)) random_amplitude = random_amplitude * random_scale self.random_scale = random_scale # Zero random amplitude where parameters are known random_amplitude = random_amplitude * should_guess_flags #self.logger.info("Random amplitude : %s", str(random_amplitude)) # NotImplemented (yet) fit_2d = None # Chi2 satistics, default if (chi_stat == 'gaussian'): compute_chi = lambda g: \ np.nansum(self._compute_error_gauss(g) ** 2 ) self.Ndegree = (dim_d - dim_p - 1) # degree of freedom # https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html # Mighell http://adsabs.harvard.edu/abs/1999ApJ...518..380M # Humphrey 2009, http://adsabs.harvard.edu/abs/2009ApJ...693..822H # Mighell if (chi_stat == 'Mighell'): compute_chi = lambda g: \ np.nansum(self._compute_error_Mighell( g, flux_profile, rotation_curve, disk_dispersion) ) self.Ndegree = (dim_d - dim_p ) # C-statistique #if (chi_stat == 'Cstat'): # compute_chi = lambda g: \ # np.nansum(self._compute_error_cstat( # g, flux_profile, rotation_curve, disk_dispersion) # ) # self.Ndegree = (dim_d - dim_p ) # Neyman if (chi_stat == 'Neyman'): compute_chi = lambda g: \ np.nansum(self._compute_error_Neyman( g, flux_profile, rotation_curve, disk_dispersion) ) self.Ndegree = (dim_d - dim_p ) # Pearson #if (chi_stat == 'Pearson'): # compute_chi = lambda g: \ # np.nansum(self._compute_error_Pearson( # g, flux_profile, rotation_curve, disk_dispersion) # ) # self.Ndegree =(dim_d - dim_p ) # Some useful vars count_alpha_ok = 0 total_iterations = 0 ki_min = sys.maxint # that way, it can only go down galaxy_old = galaxy.copy() galaxy_new = galaxy_old.copy() chi_old = compute_chi(galaxy) self.logger.info("Starting with χ² = %f", chi_old / self.Ndegree) self.logger.info("Starting with χ² = %f", chi_old / self.Ndegree) # Create empty chain with galaxy parameters and reduced chi names_param = [] names_param.extend(GalaxyParameters.names) names_param.append('reduced_chi') # names_param = [ # 'x', 'y', 'z', 'flux', 'radius', 'inclination', 'pa', # 'turnover_radius', 'maximum_velocity', 'velocity_dispersion', # 'reduced_chi', # ] types_param = ['float32' for i in range(dim_p + 1)] dt = zip(names_param, types_param) chain = np.zeros(max_iterations, dtype=dt) # intensive operation ! rate = 0. while (total_iterations < max_iterations * 20 or rate > min_acceptance_rate) and (count_alpha_ok < max_iterations): # Update loop conditions total_iterations += 1 rate = count_alpha_ok * 100. / total_iterations # Cauchy jumping random_uniforms = np.random.uniform(-np.pi / 2., np.pi / 2., size=dim_p) galaxy_new = galaxy_old + random_amplitude * np.tan(random_uniforms) # Compute reflection in parameters if galaxy_new.pa > 90.: galaxy_new.pa -= 180. galaxy_new.maximum_velocity *= -1 if galaxy_new.pa < -90.: galaxy_new.pa += 180. galaxy_new.maximum_velocity *= -1 # Check that parameters are not too close to the boundaries too_close_to_max = np.abs((galaxy_new - self.max_boundaries) / self.max_boundaries) < eps too_close_to_min = np.abs((galaxy_new - self.min_boundaries) / self.min_boundaries) < eps too_close_to_boundaries = too_close_to_min + too_close_to_max # This is really verbose, trying out color instead. # if np.any(too_close_to_max): # self.logger.stdev("Parameters close to max boundaries : %s" % (too_close_to_max * galaxy_new)) # if np.any(too_close_to_min): # self.logger.stdev("Parameters close to min boundaries : %s" % (too_close_to_min * galaxy_new)) # Make sure we're inside the preset boundaries in_boundaries = (galaxy_new >= self.min_boundaries) * (galaxy_new <= self.max_boundaries) if in_boundaries.all(): # Computing convolved model with new parameters chi_new = compute_chi(galaxy_new) if chi_new<1e-3: print chi_new,galaxy_new self.logger.error('Chi2 is 0, quit') exit() # Compute ratio of likelihood a posteriori # exp( old-new / 2 ) likelihood = math_utils.safe_exp(-0.5 * (chi_new - chi_old)) # likelihood > 1 => new is better # likelihood < 1 => accept or reject alpha = np.random.rand() # Conservation test if alpha < likelihood: count_alpha_ok += 1 # Save minimum if ki_min > chi_new: ki_min = chi_new count_min = count_alpha_ok galaxy_old = galaxy_new chi_old = chi_new vect = np.append(galaxy_new, chi_new / self.Ndegree) chain[count_alpha_ok - 1] = np.float32(vect) if verbose: info = "{count:5d} MIN={count_min:5d} {rate:2.0f}% " \ "χ²={ki:3.6f}>{ki_min:3.6f} {params:s}" info = info.format( count=count_alpha_ok, count_min=count_min, ki=chi_new / self.Ndegree, ki_min=ki_min / self.Ndegree, rate=rate, params=galaxy_new.colored_info(too_close_to_boundaries) ) if self.redshift is not None: mdyn = self._compute_mass(galaxy_new.radius, galaxy_new.rv, galaxy_new.max_vel) info += "mdyn={mass:4.2e}" info = info.format(mass=mdyn) self.dynamical_mass = mdyn print(info) # Raise if no useful iteration was run if rate == 0.: self.logger.debug("Last Galaxy Parameters : %s", galaxy_new) raise RuntimeError("No useful iteration was run. " "Try with higher max_iterations?") # Data correction vmax_sign = (chain['maximum_velocity'] < 0) pa_correction = np.where(vmax_sign, chain['pa'] + 180., chain['pa']) pa_correction = np.where(pa_correction > 180, pa_correction - 360, pa_correction) chain['pa'] = pa_correction chain['maximum_velocity'] = np.abs(chain['maximum_velocity']) # Report self.logger.info("Iterations report : %d Total, %d OK, %d%% Rate", total_iterations, count_alpha_ok, rate) self.logger.info("Storing results as parameters...") # Acceptance Rate self.logger.info("self.acceptance_rate : useful iterations count / total iterations count") self.acceptance_rate = rate # Store chain self.logger.info("self.chain : full Markov chain") good_idx = np.where(chain['reduced_chi']!=0) self.chain = chain[good_idx] # Store PSF 3D try: self.psf3d = Cube(self.instrument.psf3d) except AttributeError: pass # Extract Galaxy Parameters from chain, and store them self.best_parameters_from_chain(method_chain, last_fraction=last_chain_fraction, percentile=percentile) # Create output cubes self.logger.info("self.convolved_cube : simulated convolved cube from found galaxy parameters") self.convolved_cube = self.create_convolved_cube(self.galaxy, shape) self.convolved_cube.header = self.cube.header self.logger.info("self.deconvolved_cube : deconvolved cube from found galaxy parameters") self.deconvolved_cube = self.create_clean_cube(self.galaxy, shape) self.deconvolved_cube.header = self.cube.header self.logger.info("self.residuals_cube : diff between actual data and convolved cube, scaled by stdev margin") self.residuals_cube = (self.cube - self.convolved_cube) / self.error_cube # * np.mean(variance_cube) self.residuals_cube.header = self.cube.header #compute observed maps _ = self._make_moment_maps(self.convolved_cube, mask=True) # make moment maps with cube convolved with 3DPSF # Average of residuals, normalized to sigma_mu nz = self.residuals_cube.shape[0] self.residuals_map = (self.residuals_cube.data.sum(0) / nz) * np.sqrt(nz) # Compute the χ² ki_reduced = compute_chi(self.galaxy) / (dim_d - dim_p - 1) self.logger.info("Best χ² : %f", ki_reduced) # Show a plot of the chain if verbose, to draw attention to the chain if verbose: # Sometimes, when the number of iterations is low, plotting fails. # It is a complex issue with matplotlib, so we're 'try'-wrapping it. try: self.plot_mcmc(adapt_range='5stdev') except: pass return self.galaxy
def best_parameters_from_chain(self, method_chain, last_fraction, percentile=None): """ Computes best fit galaxy parameters from chain, using specified method_chain. method_chain: string The method to use to extract the fittest parameters from the chain. 'last' (default) : mean of the last_fraction(%) last parameters of the chain 'chi_sorted' : mean of the last_fraction(%) best fit parameters of the chain 'chi_min' : mean of last_fraction(%) of the chain around the min chi last_fraction: float % last fraction of the chain used in determining the best fit parameter percentile: float % (None as default) None: the method to use to compute the errors on the parameter is the standard deviation of the median float: the percentile (the 68th, or 95th percentile) to be used for the errors on the parameters #fixme: in which case returns the lower and upper values. Returns the galaxy parameters and the stdev """ self.method = method_chain self.chain_fraction = last_fraction self.percentile = percentile if self.chain is None: raise RuntimeError("No chain! Run .run_mcmc() first.") chain_list = np.array(self.chain.tolist()) params = chain_list[:, :-1] # remove last element of the chain, the reduced chi chain_size = np.size(self.chain, 0) #fixme: need to ignore the zeros. n = int(chain_size * last_fraction / 100.) # number of samples (last_fraction(%) of total) if method_chain == 'chi_min': min_chi_index = self._get_min_chi_index() sub_chain = params[min_chi_index - n / 2: min_chi_index + n / 2, :] elif method_chain == 'last': sub_chain = params[-n:, :] elif method_chain == 'chi_sorted': chain_sorted = np.sort(self.chain, order='reduced_chi') list_sorted = np.array(chain_sorted.tolist()) sub_chain = list_sorted[:n, :-1] #best_parameter = best_parameter[:-1] #sigma_parameter = sigma_parameter[:-1] else: raise ValueError("Unsupported chain mean method_chain '%s'" % method_chain) self.sub_chain = sub_chain # Compute best_parameters best_parameter = np.median(sub_chain, axis=0) self.galaxy = GalaxyParameters.from_ndarray(best_parameter) # Compute errors to parameters sigma_parameter = np.std(sub_chain, axis=0) if percentile is not None: error_parameter_upper = np.percentile(sub_chain, 50. + percentile/2., axis=0) error_parameter_lower = np.percentile(sub_chain, 50. - percentile/2., axis=0) self.galaxy.upper = GalaxyParametersError.from_ndarray(error_parameter_upper) self.galaxy.lower = GalaxyParametersError.from_ndarray(error_parameter_lower) self.galaxy.ICpercentile = percentile self.galaxy.stdev = GalaxyParametersError.from_ndarray(sigma_parameter) self.stdev = self.galaxy.stdev # Store galaxy parameters and stdev self.logger.info("self.galaxy : fittest parameters : %s", repr(self.galaxy)) self.logger.info("self.stdev : parameters stdev : %s", str(self.galaxy.stdev)) return None
[docs] def create_clean_cube(self, galaxy, shape): """ Creates a cube containing a clean simulation of a galaxy according to the provided parameters. galaxy: GalaxyParameters The parameters upon which the simulated galaxy will be built. shape: Tuple of 3 The 3D shape of the resulting cube. Eg: (21,21,21) flux_profile: see run_mcmc() rotation_curve: see run_mcmc() rotation_curve: see run_mcmc() Returns a HyperspectralCube """ # Create radial velocities and radial dispersions flux_cube, vz, vz_map, s_map, sig_map, sigz_disk_map, sig_intr = \ self._compute_galaxy_model(galaxy, shape) # normalize to real flux flux_map = flux_cube.sum(0) flux_map = galaxy.flux * flux_map / flux_map.sum() self.true_flux_map = Cube(flux_map) self.true_velocity_map = Cube(vz_map) self.true_disp_map = Cube(s_map) return self._create_cube(shape, flux_map, vz_map, s_map, self.instrument.z_step_kms, zo=galaxy.z)
[docs] def create_convolved_cube(self, galaxy, shape): """ Creates a cube containing a convolved simulation of a galaxy according to the provided parameters. The convolution is done by the instrument you provided upon instantiation of this class. galaxy: GalaxyParameters The parameters upon which the simulated galaxy will be built. shape: Tuple of 3 The 3D shape of the resulting cube. Eg: (21,21,21) flux_profile: see run_mcmc() rotation_curve: see run_mcmc() rotation_curve: see run_mcmc() Returns a HyperspectralCube """ clean_cube = self.create_clean_cube(galaxy, shape) return self.instrument.convolve(clean_cube)
def import_chain(self, filepath): """ Imports the chain stored in a .dat file so that you may plot. WiP ! MAY NOT WORK !! """ with open(filepath, 'r') as chain_data: self.chain = asciitable.read(chain_data.read(), Reader=asciitable.FixedWidth) self.best_parameters_from_chain('last', 60) NO_CHAIN_ERROR = "No chain to plot! Run .run_mcmc() or .import_chain() " \ "first."
[docs] def plot_mcmc(self, filepath=None, plot_likelihood=False, adapt_range='5stdev', fontsize=12): """ Plot the MCMC chain details, and then either show it or save it to a file. filepath: string If specified, will write the plot to a file instead of showing it. The file will be created at the provided filepath, be it absolute or relative. The extension of the file must be either png or pdf. sort_by_chi: bool True to sort the chain by decreasing reduced chi2 plot_likelihood: bool True to plot -log[chi2] instead adapt_range: string 'boundaries' to adapt the range to boundaries 'minmax' [default] to adapt the range to min/max values '3stdev' to adapt the range to 3 x stdev '5stdev' to adapt the range to 5 x stdev fontsize: int to change the fontsize """ if self.chain is None: raise RuntimeError(self.NO_CHAIN_ERROR) if filepath is not None: name, extension = os.path.splitext(filepath) supported_extensions = ['.png', '.pdf'] if not extension in supported_extensions: raise ValueError("Extension '%s' is not supported, use %s.", extension, ', '.join(supported_extensions)) if self.method == 'chi_sorted': chain = np.sort(self.chain, order=['reduced_chi']) chain_size = np.size(chain, 0) xmax = int(chain_size * self.chain_fraction / 100.) # number of samples (last_fraction(%) of total) xmin = 0 elif self.method == 'last': chain = self.chain chain_size = np.size(self.chain, 0) xmin = chain_size - int(chain_size * self.chain_fraction / 100.) # number of samples (last_fraction(%) of total) xmax = chain_size elif self.method == 'chi_min': min_chi_index = self._get_min_chi_index() chain = self.chain chain_size = np.size(self.chain, 0) n = int(chain_size * self.chain_fraction / 100.) xmin = np.max([0, min_chi_index - n / 2]) xmax = np.min([chain_size, min_chi_index + n / 2]) plot.rcParams['font.size'] = fontsize plot.figure(1, figsize=(19, 9)) names = chain.dtype.names plot.clf() # clear current figure plot.subplots_adjust(wspace=0.32, hspace=0.32, bottom=0.05, top=0.95, left=0.05, right=0.95) n = np.size(names) rows = 3 cols = int(math.ceil(n / (rows - 1))) for i, par in enumerate(names): if i < n - 1: # Parameters plot.subplot2grid((rows, cols), (int(math.floor(i / cols)), i % cols)) plot.plot(chain[par], zorder=0) plot.hlines(y=self.galaxy[i], xmin=xmin, xmax=xmax, color='r', label=r'$\hat P$', zorder=10) plot.hlines(y=self.galaxy[i]+self.galaxy.stdev[i],xmin=xmin, xmax=xmax,color='k',lw=2, label=r'$\sigma$', zorder=10) plot.hlines(y=self.galaxy[i]-self.galaxy.stdev[i], xmin=xmin, xmax=xmax, color='k',lw=2, zorder=10) if self.galaxy.lower is not None: plot.hlines(y=self.galaxy.lower[i], xmin=xmin, xmax=xmax, color='k', linestyles='dotted',lw=2, zorder=10, label='%d ' % (self.percentile) + '% CI') if self.galaxy.upper is not None: plot.hlines(y=self.galaxy.upper[i], xmin=xmin, xmax=xmax, color='k', linestyles='dotted',lw=2, zorder=10) if i == n-2: #plot.legend(loc=0, fontsize=13) #plot.legend(loc=0, fontsize=13, bbox_to_anchor=(0.02, 0.3)) plot.legend(loc='lower center', fontsize=11, bbox_to_anchor=(0.5, 0.0), ncol=3, fancybox=True, shadow=True) if (adapt_range == '5stdev') and (self.galaxy.stdev is not None): plot.ylim(self.galaxy[i]-5.*self.galaxy.stdev[i], self.galaxy[i]+5.*self.galaxy.stdev[i]) elif (adapt_range == '3stdev') and (self.galaxy.stdev is not None): plot.ylim(self.galaxy[i]-3.*self.galaxy.stdev[i], self.galaxy[i]+3.*self.galaxy.stdev[i]) elif (adapt_range =='boundaries') and (self.min_boundaries is not None and self.max_boundaries is not None): plot.ylim(self.min_boundaries[i], self.max_boundaries[i]) elif (adapt_range == 'minmax') or (adapt_range is None): plot.ylim(np.min(chain[par]), np.max(chain[par]) ) plot.title(par, fontsize=fontsize) else: # Last row, reduced ki plot.subplot2grid((rows, cols), (rows - 1, 0), colspan=cols) if (plot_likelihood): plot.plot(np.exp(-chain['reduced_chi'] )) plot.ylim([0,1]) plot.title(r'${\cal L}=\exp$[-$\chi^2$]') else: plot.plot(np.log10(chain['reduced_chi'] - np.min(chain['reduced_chi']))) plot.title(r'$\log$ [$\chi^2 - \chi^2_{min}$]') plot.plot(0,-1,color='grey',lw=2,label=r'$1\sigma$') if self.percentile is not None: plot.plot(0,-2,color='grey',ls='-.',lw=2,label='%.2f' % (self.percentile) +'% CI' ) plot.xticks(np.arange(2.) / 2 * np.size(chain['flux'])) if filepath is None: plot.show() else: plot.savefig(filepath)
[docs] def plot_correlations(self, filepath=None, nbins=15, fontsize=12): """ Plot the correlations between parameters. filepath: string If specified, will write the plot to a file instead of showing it, at the provided absolute or relative filepath. The extension of the file must be either png or pdf. Eg: filepath='/home/me/my_galaxy/galpak/run_correlations.png' fontsize: int to change the fontsize """ if self.chain is None: raise RuntimeError(self.NO_CHAIN_ERROR) if filepath is not None: name, extension = os.path.splitext(filepath) supported_extensions = ['.png', '.pdf'] if not extension in supported_extensions: raise ValueError("Extension '%s' is not supported, use %s.", extension, ', '.join(supported_extensions)) fig=plot.figure(1, figsize=(16, 9)) plot.clf() # clear current figure names = self.chain.dtype.names # 'x', 'y', 'z', 'flux', 'radius', 'inclination', 'pa', 'rv', # 'maximum_velocity', 'velocity_dispersion','reduced_chi' idx_selection = [4, 5, 6, 7, 8, 9] #plot.subplots_adjust(wspace=0.25, hspace=0.25, bottom=0.1, top=0.85, left=0.1, right=0.85) rows = np.shape(idx_selection)[0] - 1 cols = np.shape(idx_selection)[0] - 1 plot.rcParams['font.size'] = fontsize for i in np.arange(rows): for j in np.arange(i, cols): jj = idx_selection[i] ii = idx_selection[j + 1] xdat = self.sub_chain[:, ii] ydat = self.sub_chain[:, jj] # chain[names[jj]] x_edges = np.array([self.galaxy[ii] - self.stdev[ii] * 5, self.galaxy[ii] + self.stdev[ii] * 5.]) y_edges = np.array([self.galaxy[jj] - self.stdev[jj] * 5, self.galaxy[jj] + self.stdev[jj] * 5.]) extend = [x_edges[0], x_edges[1], y_edges[0], y_edges[1]] #plot parameters #plot.plot(xdat,ydat,lw=1) # from top,left (y,x) plot.subplot2grid((rows, cols), (i, j)) #plot.plot(xdat,ydat,'k.',lw=1,alpha=0.5) #plot.axis(extend) plot.axvline(self.galaxy[ii], ls='-', c='k') plot.axvline(self.galaxy[ii] + self.galaxy.stdev[ii], ls='-.', c='grey') plot.axvline(self.galaxy[ii] - self.galaxy.stdev[ii], ls='-.', c='grey') plot.axhline(self.galaxy[jj], ls='-', c='k') plot.axhline(self.galaxy[jj] + self.galaxy.stdev[jj], ls='-.', c='grey') plot.axhline(self.galaxy[jj] - self.galaxy.stdev[jj], ls='-.', c='grey') image2d, _edg = np.histogramdd((ydat, xdat), bins=nbins, range=((y_edges[0], y_edges[1]), (x_edges[0], x_edges[1]))) plot.contour(image2d, vmin=np.min(image2d), vmax=bn.nanmax(image2d), extent=extend, color='k') #plot.plot(xdat,ydat,'k.',alpha=0.5,lw=1) xticks = np.rint(2 * np.arange(4) / 4. * (x_edges[1] - x_edges[0]) + x_edges[0] * 2) / 2.0 yticks = np.rint(2 * np.arange(4) / 4. * (y_edges[1] - y_edges[0]) + y_edges[0] * 2) / 2.0 plot.xticks(np.unique(xticks)) plot.yticks(np.unique(yticks)) plot.xlim(x_edges) plot.ylim(y_edges) if i == j: plot.xlabel(names[ii]) plot.ylabel(names[jj]) fig.subplots_adjust(wspace=0.4) if filepath is None: plot.show() else: plot.savefig(filepath)
def plot_true_vfield(self, filepath=None, mask=None, contours=None, fontsize=12, slitwidth=3): """ plot the 2d maps and the 1d along the major axis mask: [optional] 2d nd-array mask for display purposes None: [default] default mask is flux>max(flux)/20. 1: not apply any mask nd array: mask to be used contours: [optional] 2d nd-array external map to overlay fontsize: int to change the fontsize slitwidth: 3 [default] the slit width used to extract 1d profile """ fmap = self.true_flux_map.data vmap = self.true_velocity_map.data smap = self.true_disp_map.data vmax = self.galaxy.maximum_velocity if mask is None: # default mask is flux>max(flux)/30. mask = (fmap > bn.nanmax(fmap) / 30.) * (fmap != 0) xc = self.galaxy.x yc = self.galaxy.y pixscale = self.instrument.xy_step x0 = pixscale * (-xc) x1 = pixscale * (np.shape(fmap)[1] - xc) y0 = pixscale * (-yc) y1 = pixscale * (np.shape(fmap)[0] - yc) extent = [x0, x1, y0, y1] plot.rcParams['font.size'] = fontsize plot.figure(1) plot.clf() plot.subplot(2, 3, 1) ax1=self._plot2dimage(fmap / mask, vmin=0, vmax=np.nanmax(fmap), xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', interpolation='nearest', extent=extent, contour=contours, title='True Flux map') plot.subplot(2, 3, 2) ax2=self._plot2dimage(vmap / mask, vmin=np.nanmin(vmap), vmax=bn.nanmax(vmap), xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', interpolation='nearest', extent=extent, contour=contours, title='True Vel. map') plot.subplot(2, 3, 3) s1 = bn.nanmax(smap) try: s0 = bn.nanmedian(smap) - 2 * bn.nanstd(smap) except: s0 = np.nanmean(smap) - 2 * np.nanstd(smap) ax3=self._plot2dimage(smap / mask, vmin=s0, vmax=s1, xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', extent=extent, contour=contours, interpolation='nearest', title='True Disp. map') plot.subplot(2, 3, 4) xx, slice_v = self._slit(fmap, slitwidth, ax=ax1) good = (slice_v != 0) plot.plot(pixscale * xx[good], np.log10(slice_v[good]), 'k-') plot.xlim([-3, 3]) plot.ylim([np.min(np.log10(slice_v[good])), bn.nanmax(np.log10(1.3 * slice_v[good]))]) plot.xlabel(r'$\delta x$') plot.ylabel(r'$\log$ I(r)') plot.subplot(2, 3, 5) xx, slice_v = self._slit(vmap, slitwidth, ax=ax2) rotation_curve=[pixscale * xx[good], slice_v[good]] plot.plot(pixscale * xx[good], slice_v[good], 'k-') plot.xlim([-3, 3]) plot.ylim([-vmax, vmax]) plot.axhline(vmax * np.sin(np.radians(self.galaxy.inclination)), color='k', ls='--') plot.axhline(-vmax * np.sin(np.radians(self.galaxy.inclination)), color='k', ls='--') plot.xlabel(r'$\delta x$') #p.ylabel(r'$V_{los}(x)$') plot.subplot(2, 3, 6) xx, slice_v = self._slit(smap, slitwidth, ax=ax3) plot.plot(pixscale * xx[good], slice_v[good], 'k-') plot.xlim([-3, 3]) plot.ylim([s0, s1]) plot.xlabel(r'$\delta x$') if filepath is None: plot.show() else: plot.savefig(filepath) rotation_name=re.sub('true_maps','true_Vrot',filepath[:-4])+'.dat' asciitable.write(zip(rotation_curve[0],rotation_curve[1]),output=rotation_name,Writer=asciitable.FixedWidth,names=['dx_arcsec','v_kms']) def plot_obs_vfield(self, filepath=None, mask=None, contours=None, fontsize=12, slitwidth=3): """ plot the observed 2d maps and the 1d along the major axis mask: [optional] 2d nd-array mask for display purposes None: [default] default mask is flux>max(flux)/20. 1: not apply any mask nd array: mask to be used contours: [optional] 2d nd-array external map to overlay fontsize: int to change the fontsize """ Fmap, Vmap, Smap = self._make_moment_maps(self.convolved_cube, mask=True) Fmap = self.obs_flux_map Vmap = self.obs_velocity_map Smap = self.obs_disp_map fmap = Fmap.data vmap = Vmap.data smap = Smap.data vmax = self.galaxy.maximum_velocity if mask is None: # default mask is flux>max(flux)/30. mask = (fmap > bn.nanmax(fmap) / 30.) * (fmap != 0) xc = self.galaxy.x yc = self.galaxy.y pixscale = self.instrument.xy_step x0 = pixscale * (-xc) x1 = pixscale * (np.shape(fmap)[1] - xc) y0 = pixscale * (-yc) y1 = pixscale * (np.shape(fmap)[0] - yc) extent = [x0, x1, y0, y1] plot.rcParams['font.size'] = fontsize plot.figure(1) plot.clf() plot.subplot(2, 3, 1) ax1=self._plot2dimage(fmap / mask, vmin=0, vmax=np.nanmax(fmap), xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', interpolation='nearest', extent=extent, contour=contours, title='Obs. Flux map') plot.subplot(2, 3, 2) ax2=self._plot2dimage(vmap / mask, vmin=np.nanmin(vmap), vmax=bn.nanmax(vmap), xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', interpolation='nearest', extent=extent, contour=contours, title='Obs. Vel. map') plot.subplot(2, 3, 3) s1 = bn.nanmax(smap) try: s0 = bn.nanmedian(smap) - 2 * bn.nanstd(smap) except: s0 = np.nanmean(smap) - 2 * np.nanstd(smap) ax3=self._plot2dimage(smap / mask, vmin=s0, vmax=s1, xlabel=r'$\Delta \alpha$(")', ylabel=r'$\Delta \delta$(")', extent=extent, contour=contours, interpolation='nearest', title='Obs. Disp. map') plot.subplot(2, 3, 4) xx, slice_v = self._slit(fmap, slitwidth, ax=ax1) good = (slice_v != 0) plot.plot(pixscale * xx[good], np.log10(slice_v[good]), 'k-') plot.xlim([-3, 3]) plot.ylim([np.min(np.log10(slice_v[good])), bn.nanmax(np.log10(1.3 * slice_v[good]))]) plot.xlabel(r'$\delta x$') plot.ylabel(r'$\log$ I(r)') plot.subplot(2, 3, 5) xx, slice_v = self._slit(vmap, slitwidth, ax=ax2) plot.plot(pixscale * xx[good], slice_v[good], 'k-') plot.xlim([-3, 3]) plot.ylim([-vmax, vmax]) plot.axhline(vmax * np.sin(np.radians(self.galaxy.inclination)), color='k', ls='--') plot.axhline(-vmax * np.sin(np.radians(self.galaxy.inclination)), color='k', ls='--') plot.xlabel(r'$\delta x$') #p.ylabel(r'$V_{los}(x)$') plot.subplot(2, 3, 6) xx, slice_v = self._slit(smap, slitwidth, ax=ax3) plot.plot(pixscale * xx[good], slice_v[good], 'k-') plot.xlim([-3, 3]) plot.ylim([s0, s1]) plot.xlabel(r'$\delta x$') if filepath is None: plot.show() else: plot.savefig(filepath)
[docs] def plot_images(self, filepath=None, z_crop=None): """ Plot a mosaic of images of the cropped (along z) cubes, and then either show it or save it to a file. filepath: string If specified, will write the plot to a file instead of showing it. The file will be created at the provided absolute or relative filepath. The extension of the file must be either png or pdf. z_crop: None|int The maximum and total length of the crop (in pixels) along z, centered on the galaxy's z position. If you provide zero or an even value (2n), the closest bigger odd value will be used (2n+1). By default, will not crop. """ if self.chain is None: raise RuntimeError(self.NO_CHAIN_ERROR) if filepath is not None: name, extension = os.path.splitext(filepath) supported_extensions = ['.png', '.pdf'] if not extension in supported_extensions: raise ValueError("Extension '%s' is not supported, " "you may use one of %s", extension, ', '.join(supported_extensions)) self._plot_images(self.cube, self.galaxy, self.convolved_cube, self.deconvolved_cube, self.residuals_cube, z_crop=z_crop) if filepath is None: plot.show() else: plot.savefig(filepath)
def _plot_images(self, cube, galaxy, convolved_cube, deconvolved_cube, residuals_cube, z_crop=None): """ Plot a mosaic of images of the cropped (along z) cubes. z_crop: None|int The maximum and total length of the crop (in pixels) along z, centered on the galaxy's z position. If you provide zero or an even value (2n), the closest bigger odd value will be used (2n+1). By default, will not crop. """ if z_crop is None: zmin = 0 zmax = cube.shape[0] - 1 else: if not (z_crop & 1): z_crop += 1 z0 = galaxy.z zd = (z_crop - 1) / 2 zmin = max(0, z0 - zd) zmax = z0 + zd + 1 fig = plot.figure(1, figsize=(16, 9)) plot.clf() plot.subplots_adjust(wspace=0.25, hspace=0.25, bottom=0.05, top=0.95, left=0.05, right=0.95) # MEASURE sub = fig.add_subplot(2, 2, 1) sub.set_title('Measured') measured_cube_cropped = cube.data[zmin:zmax, :, :] image_measure = (measured_cube_cropped.sum(0) / measured_cube_cropped.shape[0]) plot.imshow(image_measure, interpolation='nearest', origin='lower') plot.xticks(fontsize=8) plot.yticks(fontsize=8) colorbar = plot.colorbar() colorbar.ax.tick_params(labelsize=8) # CONVOLVED sub = fig.add_subplot(2, 2, 2) sub.set_title('Convolved') convolved_cube_cropped = convolved_cube.data[zmin:zmax, :, :] image_convolved = (convolved_cube_cropped.sum(0) / convolved_cube_cropped.shape[0]) plot.imshow(image_convolved, interpolation='nearest', origin='lower') plot.xticks(fontsize=8) plot.yticks(fontsize=8) colorbar = plot.colorbar() colorbar.ax.tick_params(labelsize=8) # DECONVOLVED sub = fig.add_subplot(2, 2, 3) sub.set_title('Deconvolved') deconvolved_cube_cropped = deconvolved_cube.data[zmin:zmax, :, :] image_deconvolved = (deconvolved_cube_cropped.sum(0) / deconvolved_cube_cropped.shape[0]) plot.imshow(image_deconvolved, interpolation='nearest', origin='lower') plot.xticks(fontsize=8) plot.yticks(fontsize=8) colorbar = plot.colorbar() colorbar.ax.tick_params(labelsize=8) # ERROR sub = fig.add_subplot(2, 2, 4) sub.set_title('Error') square_error_cube_cropped = residuals_cube.data[zmin:zmax, :, :] nz = square_error_cube_cropped.shape[0] #normalized error image in sigmas: image_error = (square_error_cube_cropped.sum(0) / nz) * np.sqrt(nz) #vmin = np.amin(image_measure) #vmax = np.amax(image_measure) #plot.imshow(image_error, interpolation='nearest', origin='lower', vmin=vmin, vmax=vmax) plot.imshow(image_error, vmin=-2.5, vmax=2.5, interpolation='nearest', origin='lower') plot.xticks(fontsize=8) plot.yticks(fontsize=8) colorbar = plot.colorbar() colorbar.ax.tick_params(labelsize=8) return fig def film_images(self, name, frames_skipped=0, fps=25): """ This is still a WiP. TODO: - print => logging - heavily document how to solve ffmpeg issue (even if it's not our job) Generates an video of the evolution of plot_images() through the chain, skipping [frames_skipped] between each draw, at [fps] frames per second. .. warning:: The generated file may take up a lot of disk space. Known issue with ffmpeg : https://stackoverflow.com/questions/17887117/python-matplotlib-basemap-animation-with-ffmpegwriter-stops-after-820-frames Fix : sudo apt-add-repository ppa:jon-severinsson/ffmpeg sudo apt-get update sudo apt-get install ffmpeg """ import matplotlib.animation as animation # Initial plot fig = self._plot_images(self.cube, self.galaxy, self.convolved_cube, self.deconvolved_cube, self.residuals_cube) # Convert the chain to a list of parameters chain_list = np.array(self.chain.tolist()) params = chain_list[:, :-1] # remove the reduced chi (last element) # Sanity check (otherwise, divisions by 0 will happen) if len(params) < 2: raise RuntimeError("Chain is not long enough for video.") self.logger.info("Encoding video...") def animate(i, skipped, count, me, chain): """ animation.FuncAnimation requires a function to generate the figure on each frame. This is the bottleneck of our film generation. i: int 0 0 1 2 3 ... up to [count] skipped: int Between each frame, skip [skipped] parameters in the chain count: int The total [count] of frames to show. me: GalPaK3D Our local self. chain: ndarray The list of parameters, as long as the MCMC chain. """ # Get the parameters from the chain index = int(math.floor(i / (1 + skipped))) galaxy = GalaxyParameters.from_ndarray(chain[index]) # Print the progression in % sys.stdout.write("\r%d: %2.2f%%" % (i, 100. * i / (count - 1))) sys.stdout.flush() # Compute the cubes deconvolved_cube = me.create_clean_cube(galaxy, me.cube.shape) convolved_cube = me.instrument.convolve(deconvolved_cube.copy()) residuals_cube = (me.cube - convolved_cube) / me.error_cube # Plot and return the plot # (this is the expensive step, and it can be optimized further) return me._plot_images(me.cube, galaxy, convolved_cube, deconvolved_cube, residuals_cube) if frames_skipped < 0: frames_skipped *= -1 frames_count = int(math.floor(len(params) / (1. + frames_skipped))) ani = animation.FuncAnimation(fig, animate, frames_count, fargs=(int(frames_skipped), frames_count, self, params), repeat=False) # Still unsure if that metadata is really written in the file metadata = { 'title': 'GalPaK\'s cubes timelapse', 'author': 'galpak', } writer = animation.FFMpegWriter(fps=fps) ani.save(name + '_images.avi', writer=writer, metadata=metadata)
[docs] def save(self, name, clobber=False): """ Saves the results of the MCMC to files : - <name>_galaxy_parameters.txt A plain text representation of the parameters of the galaxy. - <name>_galaxy_parameters.dat An asciitable representation of the parameters of the galaxy. - <name>_chain.dat An asciitable representation of the Markov Chain. Each line holds one set of galaxy parameters and its associated reduced chi. - <name>_run_parameters.txt A plain text representation of the run_parameters. - <name>_instrument.txt A plain text representation of the instrument parameters. - <name>_convolved_cube.fits A FITS file containing the PSF-convolved result cube. - <name>_deconvolved_cube.fits A FITS file containing the pre-convolution clean cube. - <name>_residuals_cube.fits A FITS file containing the diff between input data and simulation. - <name>_true_flux_map.fits A FITS file containing the true flux map [intrinsic] - <name>_true_vel_map.fits A FITS file containing the true velocity map [intrinsic] - <name>_true_sig_map.fits A FITS file containing the true dispersion map [intrinsic] - <name>_obs_flux_map.fits A FITS file containing the observed flux map [intrinsic] - <name>_obs_vel_map.fits A FITS file containing the observed velocity map [intrinsic] - <name>_obs_sig_map.fits A FITS file containing the observed dispersion map [intrinsic] - <name>_images.png A PNG image generated by the ``plot_images`` method. Note: the clobber option is always true for this file. - <name>_mcmc.png A PNG image generated by the ``plot_mcmc`` method. Note: the clobber option is always true for this file. - <name>_correlations.png A PNG image generated by the ``plot_correlations`` method. Note: the clobber option is always true for this file. - <name>true_maps.png A PNG image generated by the ``plot_true_vfield`` method. Note: the clobber option is always true for this file. The .dat files can be easily read using asciitable and its ``asciitable.FixedWidth`` Reader : :: asciitable.read('example.chain.dat', Reader=asciitable.FixedWidth) .. warning:: The generated files are not compressed and may take up a lot of disk space. name: string An absolute or relative name that will be used as prefix for the save files. Eg: 'my_run', or '/home/me/science/my_run'. clobber: bool When set to true, will OVERWRITE existing files. """ if self.chain is None: raise RuntimeError("Nothing to save! Run .run_mcmc() first.") filename = '%s_galaxy_parameters.txt' % name self._save_to_file(filename, self.galaxy.long_info(), clobber) filename = '%s_galaxy_parameters.dat' % name self._save_to_file(filename, self.galaxy.structured_info(), clobber) filename = '%s_chain.dat' % name self._save_to_file(filename, self._chain_as_asciitable(), clobber) filename = '%s_run_parameters.txt' % name self._save_to_file(filename, self.__str__(), clobber) filename = '%s_instrument.txt' % name self._save_to_file(filename, self.instrument.__str__(), clobber) filename = '%s_convolved_cube.fits' % name self.convolved_cube.write_to(filename, clobber) filename = '%s_deconvolved_cube.fits' % name self.deconvolved_cube.write_to(filename, clobber) filename = '%s_residuals_cube.fits' % name self.residuals_cube.write_to(filename, clobber) filename = '%s_3Dkernel.fits' % name self.psf3d.write_to(filename, clobber) filename = '%s_true_flux_map.fits' % name self.true_flux_map.write_to(filename, clobber) filename = '%s_true_vel_map.fits' % name self.true_velocity_map.write_to(filename, clobber) filename = '%s_true_disp_map.fits' % name self.true_disp_map.write_to(filename, clobber) filename = '%s_obs_flux_map.fits' % name self.obs_flux_map.write_to(filename, clobber) filename = '%s_obs_vel_map.fits' % name self.obs_velocity_map.write_to(filename, clobber) filename = '%s_obs_disp_map.fits' % name self.obs_disp_map.write_to(filename, clobber) filename = '%s_images' % name self.plot_images(filename + '.png') self.plot_images(filename + '.pdf') filename = '%s_mcmc' % name self.plot_mcmc(filename + '.png') self.plot_mcmc(filename + '.pdf') filename = '%s_correlations' % name self.plot_correlations(filename + '.png') self.plot_correlations(filename + '.pdf') filename = '%s_true_maps' % name self.plot_true_vfield(filename + '.png') self.plot_true_vfield(filename + '.pdf') filename = '%s_obs_maps' % name self.plot_obs_vfield(filename + '.png') self.plot_obs_vfield(filename + '.pdf') self.logger.info("Saved files in %s" % os.getcwd())
#fixme: to do # def read_files(self, name): # """ # Saves the results of the MCMC to files : # # - <name>_galaxy_parameters.txt # A plain text representation of the parameters of the galaxy. # - <name>_galaxy_parameters.dat # An asciitable representation of the parameters of the galaxy. # - <name>_chain.dat # An asciitable representation of the Markov Chain. # Each line holds one set of galaxy parameters and its associated reduced chi. # - <name>_run_param.txt # A plain text representation of the run_parameters. # - <name>_instrument.txt # A plain text representation of the instrument parameters. # - <name>_convolved_cube.fits # A FITS file containing the PSF-convolved result cube. # - <name>_deconvolved_cube.fits # A FITS file containing the pre-convolution clean cube. # - <name>_residuals_cube.fits # A FITS file containing the diff between input data and simulation. # - <name>_flux_map.fits # A FITS file containing the true flux map [intrinsic] # - <name>_vel_map.fits # A FITS file containing the true velocity map [intrinsic] # - <name>_sig_map.fits # A FITS file containing the true dispersion map [intrinsic] # - <name>_images.png # A PNG image generated by the ``plot_images`` method. # Note: the clobber option is always true for this file. # - <name>_mcmc.png # A PNG image generated by the ``plot_mcmc`` method. # Note: the clobber option is always true for this file. # - <name>_correlations.png # A PNG image generated by the ``plot_correlations`` method. # Note: the clobber option is always true for this file. # The .dat files can be easily read using asciitable and its # ``asciitable.FixedWidth`` Reader : :: # asciitable.read('example.chain.dat', Reader=asciitable.FixedWidth) # .. warning:: # The generated files are not compressed and may take up a lot of disk # space. # name: string # An absolute or relative name that will be used as prefix for the # save files. # Eg: 'my_run', or '/home/me/science/my_run'. # clobber: bool # When set to true, will OVERWRITE existing files. # """ # filename = '%s_run_param.txt' % name # dump = self._read_file(filename) # filename = '%s_instrument.txt' % name # dump =self._read_file(filename) # #filename = '%s_galaxy_parameters.txt' % name # #self._read_file(filename) # #filename = '%s_galaxy_parameters.dat' % name # #self._read_file(filename, self.galaxy.structured_info(), clobber) # filename = '%s_chain.dat' % name # self._chain_from_asciitable(filename) # self.best_parameters_from_chain(method_chain=method_chain,last_fraction=last_fraction,percentile=self.percentile) # filename = '%s_convolved_cube.fits' % name # self.convolved_cube.write_to(filename, clobber) # filename = '%s_deconvolved_cube.fits' % name # self.deconvolved_cube.write_to(filename, clobber) # filename = '%s_residuals_cube.fits' % name # self.residuals_cube.write_to(filename, clobber) # filename = '%s_3Dkernel.fits' % name # self.psf3d.write_to(filename, clobber) # filename = '%s_flux_map.fits' % name # self.true_flux_map.write_to(filename, clobber) # filename = '%s_vel_map.fits' % name # self.true_velocity_map.write_to(filename, clobber) # filename = '%s_disp_map.fits' % name # self.true_disp_map.write_to(filename, clobber) # filename = '%s_images' % name # self.plot_images(filename + '.png') # self.plot_images(filename + '.pdf') # filename = '%s_mcmc' % name # self.plot_mcmc(filename + '.png') # self.plot_mcmc(filename + '.pdf') # filename = '%s_correlations' % name # self.plot_correlations(filename + '.png') # self.plot_correlations(filename + '.pdf') # filename = '%s_true_maps' % name # self.plot_true_vfield(filename + '.png') # self.plot_true_vfield(filename + '.pdf') # self.logger.info("Saved files in %s" % os.getcwd()) def __str__(self): """ Return information about this run in a multiline string. """ return """ instrument = %s iterations = %s parameters method = %s, chain_fraction: %s, CI percentile: %s, Disk model: %s min_boundaries = %s max_boundaries = %s known_parameters = %s initial_parameters = %s random_scale = %s final_parameters = \n %s acceptance_rate = %s """ % ( self.instrument, self.max_iterations, self.method, self.chain_fraction, self.percentile, self.disk_model, self.min_boundaries, self.max_boundaries, self.known_parameters, self.initial_parameters, self.random_scale, self.galaxy.structured_info(), self.acceptance_rate ) def _chain_as_asciitable(self): """ Exports the chain as an `asciitable`. See the public API `import_chain()` for the reverse operation of loading the chain from an `asciitable` file. """ out = StringStdOut() asciitable.write(self.chain.tolist(), output=out, Writer=asciitable.FixedWidth, names=self.chain.dtype.names) return out.content def _save_to_file(self, filename, contents, clobber): if not clobber and os.path.isfile(filename): raise IOError("The file '%s' already exists. Specify clobber=True to overwrite it.") with open(filename, 'w') as f: f.write(contents) def _read_file(self, filename): with open(filename, 'r') as f: contents=f.read() return contents def _get_min_chi_index(self): """ Gets the index in the chain of the parameters with the minimal chi. """ if self.chain is None: raise RuntimeError("No chain! Run `run_mcmc()` first.") chis = np.array(self.chain.tolist())[:, -1] where = np.where(chis == np.min(chis)) return where[0][0] # there MUST be another way of doing this def _compute_mass(self, radius, rv, max_vel): #, flux_profile, rotation_curve): """ From radius, rv, max_vel, computes V profile and dynamic_mass radius: float In pixels, the half-light radius """ try: from astropy.cosmology import WMAP5 except ImportError: raise ImportError('The package `astropy` is required.') if self.redshift is None: raise RuntimeError("Cannot compute mass if redshift is not provided.") redshift = self.redshift pixscale = self.cube.xy_step # Compute angular distance in Mpc dang = WMAP5.angular_diameter_distance(redshift).value # Convert it to kpc/arcsec dang *= 1e3 * np.radians(1. / 3600) rhalf_kpc = radius * pixscale * dang # in kpc r_kpc = np.r_[0:200:100j] # in kpc r = r_kpc / dang # in arcsec r_pix = r / pixscale # in pixel if self.disk_model['rotation_curve'] == 'arctan': fv = lambda q: np.arctan(q / rv) * max_vel * 2. / np.pi # r in pix elif self.disk_model['rotation_curve'] == 'mass': s = rhalf_kpc / dang / pixscale fv = lambda q: self._fv_newton(q, s, max_vel) # r in pix elif self.disk_model['rotation_curve'] == 'exponential': # Feng inv_exp profile fv = lambda q: max_vel * (1.0 - np.exp(-q / rv)) elif self.disk_model['rotation_curve'] == 'tanh' : fv = lambda q: max_vel * np.tanh(q / rv) elif self.disk_model['rotation_curve'] == 'isothermal': fv = lambda q: 0 if q==0 else max_vel * np.sqrt(1-rv/r*np.arctan(r/rv)) else: raise ValueError("Unsupported Rotational Curve '%s'" % self.disk_model['rotation_curve']) # v² = (GM/r) in km/s compute_mass = lambda vol, rad: (1e3 * vol) ** 2 * (PARSEC * rad * 1e3) / G / SOL_MASS v_profile = fv(r_pix) v_rhalf = interpolate.spline(r_kpc, v_profile, rhalf_kpc) dynamic_mass = compute_mass(v_rhalf, rhalf_kpc) #mass_profile = compute_mass(v_profile, r_kpc) #v_2rhalf = interpolate.spline(r_kpc, v_profile, 2. * rhalf_kpc) #rvir_kpc = 180 # compute_Rvir(Vmax) #Vvir = interpolate.spline(r_kpc, v_profile, rvir_kpc) #Mvir = 0.83 * (max_vel / 200.) ** 3 * (1 + context['redshift']) ** 1.5 * 1e12 return dynamic_mass def _create_maps(self, galaxy, shape): flux_cube, vz, vz_map, s_map, sig_map, sigz_disk_map, sig_intr = \ self._compute_galaxy_model(galaxy, shape) # normalize to real flux flux_map = flux_cube.sum(0) flux_map = galaxy.flux * flux_map / flux_map.sum() return flux_map, vz_map, s_map def _create_cube(self, shape, f_map, v_map, s_map, z_step_kms, zo=None): """ Creates a 3D cube from Flux, Velocity and Dispersion maps. galaxy: GalaxyParameters shape: tuple Defines the created cube's shape f_map: ndarray 2D image of the flux v_map: ndarray 2D image of the velocity s_map: ndarray 2D image of the dispersion z_step_kms: float in km/s zo: int The centroid (in pixel) in the z-direction If not set, will be the middle of the z-axis """ vmax = z_step_kms * (shape[0] - 1) / 2. if zo is None: vzero = vmax else: vzero = z_step_kms * zo # fixme: shouldn't this be with zo-0.5 ? # Array of velocities : array of indices from to 0 to Vmax velocities = np.linspace(0, 2 * vmax, shape[0]) - vzero # Create a copy of Vmap in each z plane tmpcube = np.outer(np.ones_like(velocities), v_map) vcube = tmpcube.reshape(shape) # Create a copy of Smap in each z plane tmpcube = np.outer(np.ones_like(velocities), s_map) scube = tmpcube.reshape(shape) # Create a copy of velocities in each location if z_step_kms > 0: vbegin = np.min(velocities) vfinal = np.max(velocities) else: vbegin = np.max(velocities) vfinal = np.min(velocities) # Create grid of velocities vgrid, y, x = np.mgrid[ vbegin: vfinal + 0.9 * z_step_kms: z_step_kms, 0: np.shape(v_map)[0], 0: np.shape(v_map)[1] ] # Apply Gaussian finalcube = np.exp(-0.5 * (vgrid - vcube) ** 2 / (scube ** 2)) # For doublets, add the blue component # deltaV is defined as (l2-l1)/l2 * c ie from the reddest line if self.line is not None: # Delta between emission peaks (in km/s ?) # line['wave']=[l, lref] eg. [3726.2, 3728.9] # Observed or Rest; # line['ratio']=[0.8, 1.0] eg. [0.8, 1.0] ; the primary line for redshifts is the reddest delta = 3e5 * (self.line['wave'][1]-self.line['wave'][0])/(self.line['wave'][0]+self.line['wave'][1])*2 ratio = self.line['ratio'][0] / self.line['ratio'][1] # todo: can we skip the following if ratio == 1. and delta == 0. ? # Add the blue doublet finalcube = finalcube + ratio * np.exp(-0.5 * (vgrid - (vcube - delta)) ** 2 / (scube ** 2)) # delta = 217. # km/s OII # delta = -945 # km/s Ha 6564.614 NII 6585.27 # delta = 2872 # km/s OIII 4960.295 5008.240 # delta = 641 # km/s SII 6718.29 6732.67 # Normalize from amplitude ratios int f dv = int f dz ampsum = bn.nansum(finalcube,0) #amplitude = f_map / bn.nansum(finalcube, 0) #amplitude[np.isnan(amplitude)] = 0 amplitude = f_map / ampsum amplitude[np.isfinite(amplitude)==False] = 0 #amplitude = np.where(np.isfinite(amplitude),f_map / ampsum, 0) # Create a cube of Amplitude in each z plane amplitude_cube = np.resize(amplitude, shape) finalcube = finalcube * amplitude_cube finalcube[np.isfinite(finalcube)==False] = 0 cube = Cube(data=finalcube) cube.xy_step = self.cube.xy_step cube.z_step = self.cube.z_step cube.z_central = self.cube.z_central return cube def _compute_galaxy_model(self, galaxy, shape): """ Create a disk model in a cube galaxy: GalaxyParameters The parameters of the galaxy Returns 3D flux cube, 3D Vz cube, 2D Vmap (1st moment of Vz), 2D S_map, 2D (2nd moment of Vz), 2D disk_dispersion, 2D intrinsic """ # Return indices in source frame; vz in image frame hz = galaxy.radius * 0.15 # trick to go faster, as we know the max z shape the resulting model will fit in # z_shape = np.ceil(shape[1] / 2.5 * 0.15 * 6.) ## 6 sigmas # z_shape = np.ceil(self.max_boundaries.radius / 2.5 * 0.15 * 6.) ## 6 sigmas # or # z_shape = np.ceil(self.max_boundaries.radius * 2 * 3 * 0.15) # 3 hz_max on each side # this short cut introduced a bug in the flux_deconvolved maps! fixed after 1.6.0 z_shape = int(np.ceil ( (shape[1] + shape[2]) / 2.)) #mean of spatial dimensions self.logger.debug(" using z_shape %d in compute_galaxy_model" % (z_shape) ) self.logger.debug(" cube is %d in z- " % (shape[0]) ) # Minimum shape for z direction -- collapsed, so big sizes unimportant # z_shape = 6 hz(sigma)_max; # hz_max = size_box/2.5 (proxy for radius max) * 0.15 # Create arrays of indices in image frame; this cube has spatial dimentions in x,y,z; so should be almost a perfect cube in shape. ind_shape = tuple([z_shape, shape[1], shape[2]]) z, y, x = np.indices(ind_shape) #adding 0.5 to be centered on pixel center nx, ny, nz, vz_cube, vtot = self._map_indices(x, y, z, hz, galaxy) # Compute radius cube and 'flux_cube' [this is a cube of repeated 2D flux maps; i.e. a cylinder] if self.disk_model['flux_profile'] == 'gaussian': radius_cube, flux_cube = self.flux_gaussian(nx, ny, nz, galaxy.radius) elif self.disk_model['flux_profile'] == 'exponential': radius_cube, flux_cube = self.flux_exponential(nx, ny, nz, galaxy.radius) elif self.disk_model['flux_profile'] == 'de_vaucouleurs': radius_cube, flux_cube = self.flux_de_vaucouleurs(nx, ny, nz, galaxy.radius) else: raise ValueError("Context's profile '%s' not supported" % self.disk_model['flux_profile']) # Add disk thickness if self.disk_model['hz_profile'] == 'gaussian': flux_cube *= np.exp(-nz ** 2 / 2. / hz ** 2) elif self.disk_model['hz_profile'] == 'exponential': flux_cube *= np.exp(-np.sqrt(nz ** 2) / hz) # exponential z-profile elif self.disk_model['hz_profile'] == 'sech2': flux_cube *= np.cosh(-nz / hz)**(-2.) # sech^2 z-profile elif self.disk_model['hz_profile'] == 'none': self.logger.warning(' Using no disk thickness profile; this galaxy will become a cylinder!!! ') flux_cube = flux_cube else: raise NotImplementedError("Context profile '%s' not supported" % self.disk_model['hz_profile']) # Flux weighted flux_map = bn.nansum(flux_cube, 0) vz_map = bn.nansum(vz_cube * flux_cube / flux_map, 0) v2_map = bn.nansum(vz_cube ** 2 * flux_cube / flux_map, 0) Var_map = v2_map - vz_map ** 2 sig_map_disk = np.sqrt(Var_map) # Prevent future division per 0, by adding an epsilon epsilon=0 # this will create NaN; thus ignored; we could improve with treating derivative at r=0?! ### Add Sigma_Aleatoire_disk ### ISOTROPIC but this IS the 1-d dispersion in z-direction # A) using R/h=0.5 (V/sigma)^2 for very thin disk if self.disk_model['disk_dispersion'] == 'thin': sig_ale_disk = np.sqrt(hz * (vtot+epsilon) ** 2 / (radius_cube+epsilon) ) # dimension of r [3D-spatial] # B) using R/h=V/Sigma for compact disk elif self.disk_model['disk_dispersion'] == 'thick': sig_ale_disk = hz * (vtot+epsilon) / (radius_cube+epsilon) # dimension of r [3D-spatial] # C) Using Binney Merrifield elif self.disk_model['disk_dispersion'] == 'infinitely_thin': raise NotImplementedError("TODO : compute surf_density") # sig_ale_disk = np.sqrt(math.tau * Surf_density * G * hz_kpc) # dimension of r [3D-spatial] else: raise ValueError("Disk dispersion '%s' not supported" % self.disk_model['disk_dispersion']) # Compute weighted mean for Sig_disk # a) 2D map = flux-weighted map #Sigz_disk_map = bn.nansum(Sig_disk*A,0)/Flux_map # b) 2D map = flux-weighted map rms #sigz_map_ale = np.sqrt(bn.nansum(sig_ale_disk**2*A,0)/Flux_map) # c) Normalized with flux squared sigz_map_ale = np.sqrt(bn.nansum(sig_ale_disk ** 2 * flux_cube ** 2, 0) / bn.nansum(flux_cube * flux_cube, 0)) # Intrinsic Dispersion sig_intr = np.ones_like(sigz_map_ale) * galaxy.sigma0 # Total S-map s_map = np.sqrt(sig_map_disk ** 2 + sigz_map_ale ** 2 + sig_intr ** 2) # Normalize by flux total = bn.nansum(flux_cube) flux_cube = np.where(total>0, flux_cube / total, 0) return flux_cube, vz_cube, vz_map, s_map, sig_map_disk, sigz_map_ale, sig_intr def _compute_error_Mighell(self, galaxy, flux_profile, rotation_curve, disk_dispersion): """ Modified Statistic Mighell 1998 Sum ( D + min(1,D) - M)^2 / D + 1 """ # Compute convolved cube for given galaxy parameters cube_convolved = self.create_convolved_cube(galaxy, self.cube.shape) tmp_cube = Cube(np.where(self.cube.data>=1, 1., self.cube.data) ) difference = (self.cube + tmp_cube - cube_convolved ) **2 \ / (self.cube + 1.0) return np.ndarray.flatten(difference.data) def _compute_error_Neyman(self, galaxy, flux_profile, rotation_curve, disk_dispersion): """ Modified Neyman statistic Humphrey 2009, http://adsabs.harvard.edu/abs/2009ApJ...693..822H Sum ( M - D )^2 / max(D,1) """ # Compute convolved cube for given galaxy parameters cube_convolved = self.create_convolved_cube(galaxy, self.cube.shape) tmp_cube = Cube(np.where(self.cube.data<=1, 1., self.cube.data)) difference = (cube_convolved - self.cube)**2 / tmp_cube return np.ndarray.flatten(difference.data) def _compute_error_Pearson(self, galaxy, flux_profile, rotation_curve, disk_dispersion): """ Pearson statistic Humphrey 2009, http://adsabs.harvard.edu/abs/2009ApJ...693..822H Sum ( M - D )^2 / M """ # Compute convolved cube for given galaxy parameters cube_convolved = self.create_convolved_cube(galaxy, self.cube.shape) difference = (cube_convolved - self.cube)**2 / (cube_convolved) return np.ndarray.flatten(difference.data) def _compute_error_cstat(self, galaxy, flux_profile, rotation_curve, disk_dispersion): """ Cash statistique for Poisson noise Humphrey 2009, http://adsabs.harvard.edu/abs/2009ApJ...693..822H Sum ( M - D + D * log(D/M) ) """ # Compute convolved cube for given galaxy parameters cube_convolved = self.create_convolved_cube(galaxy, self.cube.shape) tmp_cube = np.log(self.cube.data/cube_convolved) difference = (cube_convolved - self.cube) + self.cube * tmp_cube return np.ndarray.flatten(difference.data) def _compute_error_gauss(self, galaxy): """ It computes the difference between the measured cube and the computed cube from given galaxy parameters. returns ( D - M ) / stdev """ # Compute convolved cube for given galaxy parameters cube_convolved = self.create_convolved_cube(galaxy, self.cube.shape) # Diff. between measured cube and convolved cube, scaled by the error difference = (self.cube - cube_convolved) / self.error_cube return np.ndarray.flatten(difference.data) def _map_indices(self, xx, yy, zz, hz, galaxy ): """ Takes arrays of indices, galaxy parameters, and rotates accordingly. Returns xx, yy, zz indices of object in image frame Returns z-component of velocity of object in image frame """ inclination = np.radians(galaxy.inclination) pa = np.radians(galaxy.pa - 90) zo = (np.size(zz, 0) - 1) / 2. # center along z # Rotation inclination (around x) rot_x = lambda i: np.array([[1, 0, 0], [0, np.cos(i), -np.sin(i)], [0, np.sin(i), np.cos(i)]]) rot_i = rot_x(inclination) rot_mi = rot_x(-inclination) # Rotation PA (around z) anti-clockwise rot_z = lambda a: np.array([[np.cos(a), np.sin(a), 0], [-np.sin(a), np.cos(a), 0], [0, 0, 1]]) rot_pa = rot_z(pa) rot_mpa = rot_z(-pa) # Transformation 1 from sky coord to disk plane xx = xx - galaxy.x yy = yy - galaxy.y zz = zz - zo # Rotation around z for PA nx = rot_pa[0, 0] * xx + rot_pa[0, 1] * yy ny = rot_pa[1, 0] * xx + rot_pa[1, 1] * yy nz = rot_pa[2, 2] * zz # Rotation around x for inclination x = rot_i[0, 0] * nx y = rot_i[1, 1] * ny + rot_i[1, 2] * nz z = rot_i[2, 1] * ny + rot_i[2, 2] * nz # Compute need the radius cube if self.disk_model['flux_profile'] == 'de_vaucouleurs': radius_cube, _ = self.flux_de_vaucouleurs(x, y, z, galaxy.radius) elif self.disk_model['flux_profile'] == 'gaussian': radius_cube, _ = self.flux_gaussian(x, y, z, galaxy.radius) elif self.disk_model['flux_profile'] == 'exponential': radius_cube, _ = self.flux_exponential(x, y, z, galaxy.radius) else: raise NotImplementedError("Flux profile '%s' not supported" % self.disk_model['flux_profile']) # Circular orbits -- we're DIVIDING BY ZERO ; it's okay, need for speed vx = np.where(np.isfinite(y / radius_cube), +y / radius_cube, 0) vy = np.where(np.isfinite(x / radius_cube), -x / radius_cube, 0) # For flat rot curve: if self.disk_model['rotation_curve'] == 'arctan': v_profile = np.arctan(radius_cube / galaxy.rv) * galaxy.max_vel * 2. / np.pi # For Mass=Light profile elif self.disk_model['rotation_curve'] == 'mass': v_profile = self._fv_newton(radius_cube, galaxy.radius, galaxy.max_vel) # For Feng's inv_exp profile elif self.disk_model['rotation_curve'] == 'exponential': v_profile = galaxy.max_vel * (1.0 - np.exp(-radius_cube / galaxy.rv)) elif self.disk_model['rotation_curve'] == 'tanh' : v_profile = galaxy.max_vel * np.tanh(radius_cube / galaxy.rv) elif self.disk_model['rotation_curve'] == 'isothermal' : v_profile = galaxy.max_vel * np.sqrt(1-galaxy.rv/radius_cube*np.arctan(radius_cube/galaxy.rv)) else: raise NotImplementedError("Rotational Curve '%s' not supported" % self.disk_model['rotation_curve']) # Remove NaNs where radius is 0 v_profile[radius_cube == 0] = 0. # 3D velocity vectors vx = vx * v_profile vy = vy * v_profile # Rotation of -inclination around x vvx = rot_mi[0, 0] * vx vvy = rot_mi[1, 1] * vy vvz = rot_mi[2, 1] * vy # Rotation of -pa around z vx = rot_mpa[0, 0] * vvx + rot_mpa[0, 1] * vvy vy = rot_mpa[1, 0] * vvx + rot_mpa[1, 1] * vvy vz = rot_mpa[2, 2] * vvz v = np.sqrt(vx ** 2 + vy ** 2 + vz ** 2) return x, y, z, vz, v def _fv_newton(self, r, radius, vmax): """ Returns velocity given the r-cube The cumulative flux I(<r) profile is analytically calculated given the profile shape """ mass = 1 # enclosed_mass for Gaussian if self.disk_model['flux_profile'] == 'de_vaucouleurs': #(-4 (5040 + 5040 x^(1/4) + 2520 Sqrt[x] + 840 x^(3/4) + 210 x + 42 x^(5/4) + 7 x^(3/2) + x^(7/4)))/E^x^(1/4) int_x_dev = lambda x: (-4. * (5040. + 5040. * x ** (1. / 4) + 2520. * np.sqrt(x) + 840. * x ** (3. / 4) + 210. * x + 42. * x ** (5. / 4) + 7. * x ** (3. / 2) + x ** (7. / 4))) * np.exp(-x ** (1. / 4)) mtot = mass * (int_x_dev(r / radius) - int_x_dev(0)) elif self.disk_model['flux_profile'] == 'gaussian': mtot = mass * (1. - np.exp(-r ** 2 / 2. / (2 * radius / 2.35) ** 2)) elif self.disk_model['flux_profile'] == 'exponential': mtot = mass * (1. - (r / radius) * np.exp(-r / radius) - np.exp(-r / radius)) else: raise NotImplementedError("Flux profile '%s' not supported" % self.disk_model['flux_profile']) v = np.sqrt(mtot / (r + 1e-9)) # km/s # Normalization set by Vmax (sets the mass) if np.size(v) > 1: v = v / bn.nanmax(v) * vmax else: v = v / np.max(v) * vmax return v def _set_verbose(self, verbose): """ Update the logger's status """ if verbose is True: self.logger.disabled = False np.seterr(all='warn') else: self.logger.disabled = True np.seterr(all='ignore') def _plot2dimage(self, image, vmin=-250., vmax=250., pos=None, xlabel=None, ylabel=None, contour=None, extent=None, title=None, interpolation=None): """ Plot a 2D image with colorbars. """ import matplotlib as mpl from mpl_toolkits.axes_grid import inset_locator as inl # from matplotlib.ticker import MaxNLocator # Matplotlib's default origin is not ours, we need to use 'lower' origin = 'lower' if extent is not None: plot.imshow(image, aspect='equal', vmin=vmin, vmax=vmax, extent=extent, origin=origin, interpolation=interpolation) plot.axhline(0,color='k',lw=1,alpha=0.5) plot.axvline(0,color='k',lw=1,alpha=0.5) if contour is not None: cmax = bn.nanmax(contour) plot.contour(contour, extent=extent, color='k', levels=[cmax / 10., cmax / 5., cmax / 2.], origin=origin) else: plot.imshow(image, aspect='equal', vmin=vmin, vmax=vmax, origin=origin) plot.axhline(0,color='k',lw=1,alpha=0.5) plot.axvline(0,color='k',lw=1,alpha=0.5) if contour is not None: cmax = bn.nanmax(contour) plot.contour(contour, levels=[cmax / 10., cmax / 5., cmax / 2.], color='k', origin=origin) if title is not None: plot.title(title) ax = plot.gca() v = ax.axis() if pos is not None: plot.plot(pos[0], pos[1], 'k+', ms=25, mew=3) plot.axis(v) if xlabel is not None: plot.xlabel(xlabel) if ylabel is not None: plot.ylabel(ylabel) #ax=p.gca() # Add colorbar cax = inl.inset_axes(ax, width="80%", # width = 10% of parent_bbox width height="10%", # height : 50% #borderpad=1, # bbox_to_anchor=(1,0,1,2), # bbox_transform=ax.transAxes, loc=1) norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) if vmax > 1: tick_range = np.rint(np.r_[vmin:vmax:5j]) else: tick_range = (np.r_[vmin:vmax:5j]) # matplotlib will crash and burn if ticks are not unique tick_range = np.unique(tick_range) cb = mpl.colorbar.ColorbarBase(cax, cmap=plot.cm.jet, norm=norm, orientation='horizontal', ticks=tick_range[1:-1]) #cax.xaxis.set_major_locator( MaxNLocator(nbins = 6) ) return ax def _slit(self, mapdata, slit_width, reshape=False, ax=None): """ Create 1d profile along the major axis. Note that PA = 0 is vertical, anti-clockwise from y-axis. """ #orig code #ang=np.radians(PA) #prof=np.where(s<=(slit_width/2.0+0.5),data,0.) #profile=ndimage.rotate(prof,PA,mode='constant',order=1).sum(1) #profile=profile/np.max(profile) y, x = np.indices(mapdata.shape) ang = -np.radians(self.galaxy.PA - 90.) dx = x - self.galaxy.x dy = y - self.galaxy.y dx_p = dx * np.cos(ang) - dy * np.sin(ang) dy_p = dx * np.sin(ang) + dy * np.cos(ang) if (ax!=None): #plot slit #axis labels in arcsec xlim=ax.get_xlim() ylim=ax.get_ylim() pixscale = self.instrument.xy_step xar= (x[0] - self.galaxy.x) * pixscale ang = np.radians(self.galaxy.PA + 90.) xx= xar * np.cos(ang) - (slit_width/2.) * pixscale * np.sin(ang) yy= xar * np.sin(ang) + (slit_width/2.) * pixscale * np.cos(ang) ax.plot(xx,yy,'-',lw=2,c='grey',alpha=0.6) xx= xar * np.cos(ang) + (slit_width/2.) * pixscale * np.sin(ang) yy= xar * np.sin(ang) - (slit_width/2.) * pixscale * np.cos(ang) ax.plot(xx,yy,'-',lw=2,c='grey',alpha=0.6) ax.set_xlim(xlim) ax.set_ylim(ylim) slit_vert = np.abs(dy_p) # used for PA=0 slit_horiz = dx_p # distance along the slit slit_data = np.where(slit_vert <= (slit_width / 2.0 ), mapdata, 0.) slit_distance = np.where(slit_vert <= (slit_width / 2.0 ), slit_horiz,0.) # PA is anti-clockwise, rotate is clockwise rot_ang = self.galaxy.PA + 90 # to have horizontal slit image slit_rot = ndimage.rotate(slit_data, rot_ang, mode='constant', order=0,reshape=reshape) # 0 because spline introduces errors! dist_rot = ndimage.rotate(slit_distance, rot_ang, mode='constant', order=0,reshape=reshape) a = np.sum(slit_rot != 0,axis=(0)) #nansum returns a bool with bottelneck; not with numpy!! profile = np.where(a!=0, bn.nansum(slit_rot, axis=(0)) / a, 0.) #mean of the slit b = np.sum(dist_rot != 0,axis=(0)) # pixels not zero and not nan xaxis = np.where(b!=0, bn.nansum(dist_rot, axis=(0)) / b, 0.) return xaxis, profile def _make_moment_maps(self, cube=None, mask=False, cut_level=50, parameters=None, instrument=None, remove_LSF_from_disp=False): """ make moment maps from a noiseless cube assumes no continuum to be removed :param mask: boolean [default: None] to apply masking :param cube: HyperspectralCube :return: """ if (cube is None): cube = self.convolved_cube if (instrument is None): vstep = self.instrument.z_step_kms lsf_vector = self.instrument.lsf.as_vector(cube) if remove_LSF_from_disp: lsf_fwhm = self.instrument.lsf.fwhm / self.instrument.z_step #in pixel ! else: vstep = instrument.z_step_kms lsf_vector = instrument.lsf.as_vector(cube) if remove_LSF_from_disp: x=np.arange(np.size(lsf_vector)) sigma = np.sqrt(np.sum(x**2*lsf_1d)-np.sum(x*lsf_1d)**2) #in pixel ! lsf_fwhm = sigma * 2.35 zgrid, _, _ = np.indices(cube.shape) if parameters is None: zo = self.galaxy.z else: zo = parameters.z if mask is True: # default mask is flux>max(flux)/30. mask = (cube.data > bn.nanmax(cube.data) / cut_level ) else: mask = np.ones_like(cube.data) #if the line is a doublet use a weighted average if self.line is not None: delta = 3e5 * (self.line['wave'][1]-self.line['wave'][0])/(self.line['wave'][0]+self.line['wave'][1])*2 r1 = self.line['ratio'][0] r2 = self.line['ratio'][1] p = r1 / (r1+r2) #weight of first/blue line # weighted averaged zo #zo_avg = ((zo-delta/vstep) * r1 + zo * r2) / (r1 + r2) #print zo,zo-delta/vstep,zo_avg #zo = zo_avg #in km/s # M1 = p * mu1 + (1-p) * mu2 ; m1 = mu2-Delta # mu2 = (M1 - p mu1 ) / (1-p) # mu2 = M1 + p delta Fmap = bn.nansum(cube.data * mask,axis=0) cube_norm = np.where(np.isfinite(cube.data / Fmap), cube.data / Fmap, 0) M1map = bn.nansum(cube_norm * mask * (zgrid - zo), axis=0) * vstep Vmap = (M1map + p * delta) # Vmap # M2 = p * (mu1^2+sig^2) + (1-p) * (mu2^2+sig^2); m1 = mu2-Delta # M2 = sig^2 + (p mu1^2 + (1-p) mu2^2) # M2 - p (mu2-delta)**2 - (1-p) mu2^2) = sig^2 M2map = bn.nansum(cube.data / Fmap * mask * (zgrid -zo)**2, axis=0) * vstep**2 S2map = M2map - (Vmap-delta)**2 * p - (Vmap)**2 * (1-p) Smap = np.sqrt(S2map) else: Fmap = bn.nansum(cube.data * mask,axis=0) cube_norm = np.where(np.isfinite(cube.data / Fmap), cube.data / Fmap, 0) Vmap = bn.nansum(cube_norm * mask * (zgrid-zo), axis=0) * vstep # mu S2map = bn.nansum(cube_norm * mask *(zgrid-zo)**2, axis=0) * vstep**2 # mu^2 + sigma^2 Smap = np.sqrt(S2map-Vmap**2) #Remove instrument LSF if (remove_LSF_from_disp): self.logger.info('removing LSF from dispersion map in quadrature; with FWHM %.5f pix' % (lsf_fwhm) ) Smap=np.sqrt(Smap**2 - (lsf_fwhm/2.35 * vstep)**2) self.obs_flux_map = Cube(Fmap) self.obs_velocity_map = Cube(Vmap) self.obs_disp_map = Cube(Smap) return self.obs_flux_map, self.obs_velocity_map, self.obs_disp_map ### STATIC METHODS ######################################################### @staticmethod def flux_gaussian(nx, ny, nz, rhwhm): """ Disk Gaussian profile with Gaussian thickness """ radius = np.sqrt(nx ** 2 + ny ** 2) energy = np.exp(-radius ** 2 / 2. / (2. * rhwhm / 2.35) ** 2) #* np.exp(-nz ** 2 / 2. / hz ** 2) return radius, energy @staticmethod def flux_sersic(nx, ny, nz, size, index): """ Sersic profile with Gaussian thickness See http://en.wikipedia.org/wiki/Sersic_profile """ beta = 1.9992 * index - 0.3271 radius = np.sqrt(nx ** 2 + ny ** 2) energy = np.exp(-beta * (radius / size) ** (1. / index)) #energy = np.exp(-beta * (radius / size) ** (1. / index)) * np.exp(-nz ** 2 / 2. / hz ** 2) #energy = np.exp(-beta * (radius / size) ** (1. / index)) * np.exp(-np.sqrt(nz ** 2) / hz ) #exponential z-profile #energy = np.exp(-beta * (radius / size) ** (1. / index)) * np.cosh(-nz / hz )**(-2.) #sech^2 z-profile return radius, energy @staticmethod def flux_exponential(nx, ny, nz, size): """ Disk exponential profile with Gaussian thickness This is merely a Sérsic profile with index = 1 """ return GalPaK3D.flux_sersic(nx, ny, nz, size, 1.) @staticmethod def flux_de_vaucouleurs(nx, ny, nz, size): """ De Vaucouleurs profile with Gaussian thickness See http://en.wikipedia.org/wiki/De_Vaucouleurs%27_law This is merely a Sérsic profile with index = 4 """ return GalPaK3D.flux_sersic(nx, ny, nz, size, 4.)