diff --git a/odl/operator/default_ops.py b/odl/operator/default_ops.py index 3512ca3b9c5..63520cb5da0 100644 --- a/odl/operator/default_ops.py +++ b/odl/operator/default_ops.py @@ -182,7 +182,7 @@ class LinCombOperator(Operator): LinCombOperator(a, b)([x, y]) == a * x + b * y """ - def __init__(self, space, a, b): + def __init__(self, space, a, b, rangeType=None): """Initialize a new instance. Parameters @@ -204,8 +204,11 @@ def __init__(self, space, a, b): >>> z rn(3).element([ 2., 4., 6.]) """ + if rangeType is None: + rangeType = space.dtype domain = ProductSpace(space, space) - super(LinCombOperator, self).__init__(domain, space, linear=True) + super(LinCombOperator, self).__init__(domain, space.astype(rangeType), + linear=True) self.a = a self.b = b diff --git a/odl/operator/operator.py b/odl/operator/operator.py index 33d9b2acb52..9e1a9594d10 100644 --- a/odl/operator/operator.py +++ b/odl/operator/operator.py @@ -1076,7 +1076,6 @@ class OperatorSum(Operator): The sum is only well-defined for `Operator` instances where `Operator.range` is a `LinearSpace`. - """ def __init__(self, left, right, tmp_ran=None, tmp_dom=None): @@ -1111,8 +1110,14 @@ def __init__(self, left, right, tmp_ran=None, tmp_dom=None): rn(3).element([ 2., 4., 6.]) """ if left.range != right.range: - raise OpTypeError('operator ranges {!r} and {!r} do not match' - ''.format(left.range, right.range)) + if (isinstance(left.range, Field) and + isinstance(right.range, Field)): + range = left.range + right.range + else: + raise OpTypeError('operator ranges {!r} and {!r} do not match' + ''.format(left.range, right.range)) + else: + range = left.range if not isinstance(left.range, (LinearSpace, Field)): raise OpTypeError('`left.range` {!r} not a `LinearSpace` or ' '`Field` instance'.format(left.range)) @@ -1129,7 +1134,7 @@ def __init__(self, left, right, tmp_ran=None, tmp_dom=None): ''.format(tmp_dom, left.domain)) super(OperatorSum, self).__init__( - left.domain, left.range, + left.domain, range, linear=left.is_linear and right.is_linear) self.__left = left self.__right = right diff --git a/odl/set/sets.py b/odl/set/sets.py index 16de1b8fa9e..1eb8f8265d9 100644 --- a/odl/set/sets.py +++ b/odl/set/sets.py @@ -428,10 +428,10 @@ def __hash__(self): def element(self, inp=None): """Return a real number from ``inp`` or from scratch.""" - if inp is not None: - return float(inp) - else: + if inp is None: return 0.0 + else: + return float(getattr(inp, 'real', inp)) @property def examples(self): diff --git a/odl/solvers/functional/default_functionals.py b/odl/solvers/functional/default_functionals.py index 7097892860f..d8ca902f267 100644 --- a/odl/solvers/functional/default_functionals.py +++ b/odl/solvers/functional/default_functionals.py @@ -15,6 +15,7 @@ from odl.solvers.functional.functional import (Functional, FunctionalQuadraticPerturb) from odl.space import ProductSpace +from odl.set import RealNumbers from odl.operator import (Operator, ConstantOperator, ZeroOperator, ScalingOperator, DiagonalOperator, PointwiseNorm) from odl.solvers.nonsmooth.proximal_operators import ( @@ -59,18 +60,23 @@ class LpNorm(Functional): \| x \|_p = \\left(\\int_\Omega |x(t)|^p dt. \\right)^{1/p} """ - def __init__(self, space, exponent): + def __init__(self, domain, exponent, range=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. exponent : float Exponent for the norm (``p``). + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. """ + if range is None: + range = RealNumbers() + super(LpNorm, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, range=range, linear=False, grad_lipschitz=np.nan) self.exponent = float(exponent) # TODO: update when integration operator is in place: issue #440 @@ -135,7 +141,8 @@ class L1Gradient(Operator): def __init__(self): """Initialize a new instance.""" super(L1Gradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, + linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" @@ -155,7 +162,8 @@ class L2Gradient(Operator): def __init__(self): """Initialize a new instance.""" super(L2Gradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, + linear=False) def _call(self, x): """Apply the gradient operator to the given point. @@ -177,8 +185,7 @@ def _call(self, x): def __repr__(self): """Return ``repr(self)``.""" return '{}({!r}, {!r})'.format(self.__class__.__name__, - self.domain, - self.exponent) + self.domain, self.exponent) class GroupL1Norm(Functional): @@ -209,7 +216,7 @@ class GroupL1Norm(Functional): \mathrm{d}x. """ - def __init__(self, vfspace, exponent=None): + def __init__(self, vfspace, exponent=None, range=None): """Initialize a new instance. Parameters @@ -223,6 +230,8 @@ def __init__(self, vfspace, exponent=None): 0 and 1 are currently not supported due to numerical instability. Infinity gives the supremum norm. Default: ``vfspace.exponent``, usually 2. + range : `Field`, optional + Range of the functional. Default: ``vfspace.field``. Examples -------- @@ -244,7 +253,7 @@ def __init__(self, vfspace, exponent=None): raise TypeError('`space.is_power_space` must be `True`') super(GroupL1Norm, self).__init__( - space=vfspace, linear=False, grad_lipschitz=np.nan) + domain=vfspace, range=range, linear=False, grad_lipschitz=np.nan) self.pointwise_norm = PointwiseNorm(vfspace, exponent) def _call(self, x): @@ -287,7 +296,7 @@ class GroupL1Gradient(Operator): def __init__(self): """Initialize a new instance.""" super(GroupL1Gradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x, out): """Return ``self(x)``.""" @@ -338,7 +347,7 @@ class IndicatorGroupL1UnitBall(Functional): GroupL1Norm """ - def __init__(self, vfspace, exponent=None): + def __init__(self, vfspace, exponent=None, range=None): """Initialize a new instance. Parameters @@ -352,6 +361,8 @@ def __init__(self, vfspace, exponent=None): 0 and 1 are currently not supported due to numerical instability. Infinity gives the supremum norm. Default: ``vfspace.exponent``, usually 2. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. Examples -------- @@ -373,7 +384,7 @@ def __init__(self, vfspace, exponent=None): raise TypeError('`space.is_power_space` must be `True`') super(IndicatorGroupL1UnitBall, self).__init__( - space=vfspace, linear=False, grad_lipschitz=np.nan) + domain=vfspace, range=range, linear=False, grad_lipschitz=np.nan) self.pointwise_norm = PointwiseNorm(vfspace, exponent) def _call(self, x): @@ -453,18 +464,22 @@ class IndicatorLpUnitBall(Functional): norm. """ - def __init__(self, space, exponent): + def __init__(self, domain, exponent, range=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. exponent : int or infinity Specifies wich norm to use. + range : `Field`, optional + Range of the functional. Default: ``domain.field``. """ - super(IndicatorLpUnitBall, self).__init__(space=space, linear=False) - self.__norm = LpNorm(space, exponent) + super(IndicatorLpUnitBall, self).__init__(domain=domain, range=range, + linear=False) + + self.__norm = LpNorm(domain, exponent, range=RealNumbers()) self.__exponent = float(exponent) @property @@ -501,11 +516,12 @@ def convex_conj(self): monotone operator theory in Hilbert spaces*. Springer, 2011. """ if self.exponent == np.inf: - return L1Norm(self.domain) + return L1Norm(self.domain, range=self.range) elif self.exponent == 2: - return L2Norm(self.domain) + return L2Norm(self.domain, range=self.range) else: - return LpNorm(self.domain, exponent=conj_exponent(self.exponent)) + return LpNorm(self.domain, exponent=conj_exponent(self.exponent), + range=self.range) @property def proximal(self): @@ -561,20 +577,21 @@ class L1Norm(LpNorm): rn(3).element([ 0.5, 0. , 0. ]) """ - def __init__(self, space): + def __init__(self, domain, range=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. """ - super(L1Norm, self).__init__(space=space, exponent=1) + super(L1Norm, self).__init__(domain=domain, range=range, exponent=1) def __repr__(self): """Return ``repr(self)``.""" - return '{}({!r})'.format(self.__class__.__name__, - self.domain) + return '{}({!r})'.format(self.__class__.__name__, self.domain) class L2Norm(LpNorm): @@ -599,20 +616,21 @@ class L2Norm(LpNorm): \| x \|_2 = \\sqrt{ \\int_\Omega |x(t)|^2 dt. } """ - def __init__(self, space): + def __init__(self, domain, range=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. """ - super(L2Norm, self).__init__(space=space, exponent=2) + super(L2Norm, self).__init__(domain=domain, range=range, exponent=2) def __repr__(self): """Return ``repr(self)``.""" - return '{}({!r})'.format(self.__class__.__name__, - self.domain) + return '{}({!r})'.format(self.__class__.__name__, self.domain) class L2NormSquared(Functional): @@ -645,16 +663,18 @@ class L2NormSquared(Functional): rn(3).element([ 0.5 , 0.25, 0.2 ]) """ - def __init__(self, space): + def __init__(self, domain, range=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. """ super(L2NormSquared, self).__init__( - space=space, linear=False, grad_lipschitz=2) + domain=domain, range=range, linear=False, grad_lipschitz=2) # TODO: update when integration operator is in place: issue #440 def _call(self, x): @@ -700,18 +720,21 @@ class ConstantFunctional(Functional): This functional maps all elements in the domain to a given, constant value. """ - def __init__(self, space, constant): + def __init__(self, domain, constant, range=None): """Initialize a new instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` Domain of the functional. constant : element in ``domain.field`` The constant value of the functional + range : `Field`, optional + Range of the functional. Default: ``domain.field``. """ super(ConstantFunctional, self).__init__( - space=space, linear=(constant == 0), grad_lipschitz=0) + domain=domain, range=range, linear=(constant == 0), + grad_lipschitz=0) self.__constant = self.range.element(constant) @property @@ -747,7 +770,7 @@ def convex_conj(self): \\infty & \\text{else} \\end{array} \\right. """ - return IndicatorZero(self.domain, -self.constant) + return IndicatorZero(self.domain, -self.constant, range=self.range) def __repr__(self): """Return ``repr(self)``.""" @@ -759,15 +782,18 @@ class ZeroFunctional(ConstantFunctional): """Functional that maps all elements in the domain to zero.""" - def __init__(self, space): + def __init__(self, domain, range=None): """Initialize a new instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` Domain of the functional. + range : `Field`, optional + Range of the functional. Default: ``domain.field``. """ - super(ZeroFunctional, self).__init__(space=space, constant=0) + super(ZeroFunctional, self).__init__(domain=domain, range=range, + constant=0) def __repr__(self): """Return ``repr(self)``.""" @@ -799,8 +825,9 @@ def __init__(self, field, scale): >>> func(5) 15.0 """ - Functional.__init__(self, space=field, linear=True, grad_lipschitz=0) - ScalingOperator.__init__(self, field, scale) + Functional.__init__(self, domain=field, range=field, linear=True, + grad_lipschitz=0) + ScalingOperator.__init__(self, domain=field, scalar=scale) @property def gradient(self): @@ -844,19 +871,21 @@ class IndicatorBox(Functional): \\end{cases} """ - def __init__(self, space, lower=None, upper=None): + def __init__(self, domain, lower=None, upper=None, range=None): """Initialize an instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` Domain of the functional. - lower : ``space.field`` element or ``space`` `element-like`, optional + lower : ``domain.field`` element or ``domain`` `element-like`, optional The lower bound. - Default: ``None``, interpreted as -infinity - upper : ``space.field`` element or ``space`` `element-like`, optional + Default: `-infinity` + upper : ``domain.field`` element or ``domain`` `element-like`, optional The upper bound. - Default: ``None``, interpreted as +infinity + Default: `+infinity` + range : `Field`, optional + Range of the functional. Default: ``domain.field``. Examples -------- @@ -867,7 +896,7 @@ def __init__(self, space, lower=None, upper=None): >>> func([0, 1, 3]) # one point outside inf """ - super(IndicatorBox, self).__init__(space, linear=False) + super(IndicatorBox, self).__init__(domain, range=range, linear=False) self.lower = lower self.upper = upper @@ -905,13 +934,15 @@ class IndicatorNonnegativity(IndicatorBox): \\end{cases} """ - def __init__(self, space): + def __init__(self, domain, range=None): """Initialize an instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` Domain of the functional. + range : `Field`, optional + Range of the functional. Default: ``domain.field``. Examples -------- @@ -923,7 +954,7 @@ def __init__(self, space): inf """ super(IndicatorNonnegativity, self).__init__( - space, lower=0, upper=None) + domain, range=range, lower=0, upper=None) def __repr__(self): """Return ``repr(self)``.""" @@ -937,15 +968,17 @@ class IndicatorZero(Functional): The function has a constant value if the input is zero, otherwise infinity. """ - def __init__(self, space, constant=0): + def __init__(self, domain, constant=0, range=None): """Initialize a new instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` Domain of the functional. constant : element in ``domain.field``, optional The constant value of the functional + range : `Field`, optional + Range of the functional. Default: ``domain.field``. Examples -------- @@ -960,7 +993,7 @@ def __init__(self, space, constant=0): >>> func([0, 0, 0]) 2 """ - super(IndicatorZero, self).__init__(space, linear=False) + super(IndicatorZero, self).__init__(domain, range=range, linear=False) self.__constant = constant @property @@ -1058,17 +1091,17 @@ class KullbackLeibler(Functional): .. _Csiszar1991: http://www.jstor.org/stable/2241918 """ - def __init__(self, space, prior=None): + def __init__(self, domain, prior=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. - prior : ``space`` `element-like`, optional + prior : ``domain`` `element-like`, optional Depending on the context, the prior, target or data distribution. It is assumed to be nonnegative. - Default: if None it is take as the one-element. + Default: `one-element`. Examples -------- @@ -1091,7 +1124,7 @@ def __init__(self, space, prior=None): 3.0 """ super(KullbackLeibler, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, linear=False, grad_lipschitz=np.nan) if prior is not None and prior not in self.domain: raise ValueError('`prior` not in `domain`' @@ -1145,7 +1178,7 @@ class KLGradient(Operator): def __init__(self): """Initialize a new instance.""" super(KLGradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. @@ -1207,20 +1240,20 @@ class KullbackLeiblerConvexConj(Functional): KullbackLeibler : convex conjugate functional """ - def __init__(self, space, prior=None): + def __init__(self, domain, prior=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. - prior : ``space`` `element-like`, optional + prior : ``domain`` `element-like`, optional Depending on the context, the prior, target or data distribution. It is assumed to be nonnegative. - Default: if None it is take as the one-element. + Default: `one-element`. """ super(KullbackLeiblerConvexConj, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, linear=False, grad_lipschitz=np.nan) if prior is not None and prior not in self.domain: raise ValueError('`prior` not in `domain`' @@ -1271,7 +1304,7 @@ class KLCCGradient(Operator): def __init__(self): """Initialize a new instance.""" super(KLCCGradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. @@ -1353,20 +1386,20 @@ class KullbackLeiblerCrossEntropy(Functional): .. _Csiszar1991: http://www.jstor.org/stable/2241918 """ - def __init__(self, space, prior=None): + def __init__(self, domain, prior=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. - prior : ``space`` `element-like`, optional + prior : ``domain`` `element-like`, optional Depending on the context, the prior, target or data distribution. It is assumed to be nonnegative. - Default: if None it is take as the one-element. + Default: `one-element`. """ super(KullbackLeiblerCrossEntropy, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, linear=False, grad_lipschitz=np.nan) if prior is not None and prior not in self.domain: raise ValueError('`prior` not in `domain`' @@ -1416,7 +1449,7 @@ class KLCrossEntropyGradient(Operator): def __init__(self): """Initialize a new instance.""" super(KLCrossEntropyGradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. @@ -1481,20 +1514,20 @@ class KullbackLeiblerCrossEntropyConvexConj(Functional): KullbackLeiblerCrossEntropy : convex conjugate functional """ - def __init__(self, space, prior=None): + def __init__(self, domain, prior=None): """Initialize a new instance. Parameters ---------- - space : `DiscreteLp` or `TensorSpace` + domain : `DiscreteLp` or `TensorSpace` Domain of the functional. - prior : ``space`` `element-like`, optional + prior : ``domain`` `element-like`, optional Depending on the context, the prior, target or data distribution. It is assumed to be nonnegative. - Default: if None it is take as the one-element. + Default: `one-element`. """ super(KullbackLeiblerCrossEntropyConvexConj, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, linear=False, grad_lipschitz=np.nan) if prior is not None and prior not in self.domain: raise ValueError('`prior` not in `domain`' @@ -1529,7 +1562,7 @@ class KLCrossEntCCGradient(Operator): def __init__(self): """Initialize a new instance.""" super(KLCrossEntCCGradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" @@ -1621,7 +1654,7 @@ def __init__(self, *functionals): ---------- functional1, ..., functionalN : `Functional` The functionals in the sum. - Can also be given as ``space, n`` with ``n`` integer, + Can also be given as ``domain, n`` with ``n`` integer, in which case the functional is repeated ``n`` times. Examples @@ -1647,15 +1680,19 @@ def __init__(self, *functionals): >>> f_sum = odl.solvers.SeparableSum(l1, 5) """ # Make a power space if the second argument is an integer - if (len(functionals) == 2 and - isinstance(functionals[1], Integral)): + if (len(functionals) == 2 and isinstance(functionals[1], Integral)): functionals = [functionals[0]] * functionals[1] + range = RealNumbers() + for func in functionals: + range = (func.range if func.range.contains_set(range) else range) + domains = [func.domain for func in functionals] domain = ProductSpace(*domains) linear = all(func.is_linear for func in functionals) - super(SeparableSum, self).__init__(space=domain, linear=linear) + super(SeparableSum, self).__init__(domain=domain, range=range, + linear=linear) self.__functionals = tuple(functionals) def _call(self, x): @@ -1767,9 +1804,14 @@ def __init__(self, operator=None, vector=None, constant=0): raise ValueError('need to provide at least one of `operator` and ' '`vector`') if operator is not None: + if operator.range != operator.domain: + raise ValueError('domain and range of `operator` must be ' + 'identical') domain = operator.domain + range = operator.domain.field elif vector is not None: domain = vector.space + range = vector.space.field if (operator is not None and vector is not None and vector not in operator.domain): @@ -1777,7 +1819,8 @@ def __init__(self, operator=None, vector=None, constant=0): 'to match') super(QuadraticForm, self).__init__( - space=domain, linear=(operator is None and constant == 0)) + domain=domain, range=range, + linear=(operator is None and constant == 0)) self.__operator = operator self.__vector = vector @@ -1870,7 +1913,7 @@ def convex_conj(self): IndicatorZero """ if self.operator is None: - tmp = IndicatorZero(space=self.domain, constant=-self.constant) + tmp = IndicatorZero(domain=self.domain, constant=-self.constant) if self.vector is None: return tmp else: @@ -1919,18 +1962,19 @@ class NuclearNorm(Functional): Models* SIAM Journal of Imaging Sciences 9(1): 116--151, 2016. """ - def __init__(self, space, outer_exp=1, singular_vector_exp=2): + def __init__(self, domain, outer_exp=1, singular_vector_exp=2, range=None): """Initialize a new instance. Parameters ---------- - space : `ProductSpace` of `ProductSpace` of `TensorSpace` + domain : `ProductSpace` of `ProductSpace` of `TensorSpace` Domain of the functional. outer_exp : {1, 2, inf}, optional Exponent for the outer norm. singular_vector_exp : {1, 2, inf}, optional Exponent for the norm for the singular vectors. - + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. Examples -------- Simple example, nuclear norm of matrix valued function with all ones @@ -1943,17 +1987,19 @@ def __init__(self, space, outer_exp=1, singular_vector_exp=2): >>> norm(space.one()) 6.0 """ - if (not isinstance(space, ProductSpace) or - not isinstance(space[0], ProductSpace)): - raise TypeError('`space` must be a `ProductSpace` of ' + if (not isinstance(domain, ProductSpace) or + not isinstance(domain[0], ProductSpace)): + raise TypeError('`domain` must be a `ProductSpace` of ' '`ProductSpace`s') - if (not space.is_power_space or not space[0].is_power_space): - raise TypeError('`space` must be of the form `TensorSpace^(nxm)`') - + if (not domain.is_power_space or not domain[0].is_power_space): + raise TypeError('`domain` must be of the form `TensorSpace^(nxm)`') + if range is None: + range = RealNumbers() super(NuclearNorm, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) + domain=domain, range=range, linear=False, grad_lipschitz=np.nan) - self.outernorm = LpNorm(self.domain[0, 0], exponent=outer_exp) + self.outernorm = LpNorm(self.domain[0, 0], exponent=outer_exp, + range=range) self.pwisenorm = PointwiseNorm(self.domain[0], exponent=singular_vector_exp) self.pshape = (len(self.domain), len(self.domain[0])) @@ -2030,7 +2076,7 @@ class NuclearNormProximal(Operator): def __init__(self, sigma): self.sigma = float(sigma) super(NuclearNormProximal, self).__init__( - func.domain, func.domain, linear=False) + func.domain, range=func.domain, linear=False) def _call(self, x): """Return ``self(x)``.""" @@ -2094,8 +2140,7 @@ def convex_conj(self): def __repr__(self): """Return ``repr(self)``.""" - return '{}({!r}, {}, {})'.format(self.__class__.__name__, - self.domain, + return '{}({!r}, {}, {})'.format(self.__class__.__name__, self.domain, self.outernorm.exponent, self.pwisenorm.exponent) @@ -2129,18 +2174,19 @@ class IndicatorNuclearNormUnitBall(Functional): Models* SIAM Journal of Imaging Sciences 9(1): 116--151, 2016. """ - def __init__(self, space, outer_exp=1, singular_vector_exp=2): + def __init__(self, domain, outer_exp=1, singular_vector_exp=2, range=None): """Initialize a new instance. Parameters ---------- - space : `ProductSpace` of `ProductSpace` of `TensorSpace` + domain : `ProductSpace` of `ProductSpace` of `TensorSpace` Domain of the functional. outer_exp : {1, 2, inf}, optional Exponent for the outer norm. singular_vector_exp : {1, 2, inf}, optional Exponent for the norm for the singular vectors. - + range : `Field`, optional + Range of the functional. Default: ``domain.field``. Examples -------- Simple example, nuclear norm of matrix valued function with all ones @@ -2154,9 +2200,11 @@ def __init__(self, space, outer_exp=1, singular_vector_exp=2): >>> norm(space.one()) inf """ + if range is None: + range = getattr(domain, 'field', RealNumbers()) super(IndicatorNuclearNormUnitBall, self).__init__( - space=space, linear=False, grad_lipschitz=np.nan) - self.__norm = NuclearNorm(space, outer_exp, singular_vector_exp) + domain=domain, range=range, linear=False, grad_lipschitz=np.nan) + self.__norm = NuclearNorm(domain, outer_exp, singular_vector_exp) def _call(self, x): """Return ``self(x)``.""" @@ -2187,8 +2235,7 @@ def convex_conj(self): def __repr__(self): """Return ``repr(self)``.""" - return '{}({!r}, {}, {})'.format(self.__class__.__name__, - self.domain, + return '{}({!r}, {}, {})'.format(self.__class__.__name__, self.domain, self.__norm.outernorm.exponent, self.__norm.pwisenorm.exponent) @@ -2242,7 +2289,7 @@ class MoreauEnvelope(Functional): https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf """ - def __init__(self, functional, sigma=1.0): + def __init__(self, functional, sigma=1.0, range=None): """Initialize an instance. Parameters @@ -2253,6 +2300,8 @@ def __init__(self, functional, sigma=1.0): sigma : positive float, optional The scalar ``sigma`` in the definition of the Moreau envelope. Larger values mean stronger smoothing. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. Examples -------- @@ -2262,10 +2311,14 @@ def __init__(self, functional, sigma=1.0): >>> l1_norm = odl.solvers.L1Norm(space) >>> smoothed_l1 = MoreauEnvelope(l1_norm) """ + if range is None: + range = RealNumbers() + super(MoreauEnvelope, self).__init__( - space=functional.domain, linear=False) + domain=functional.domain, range=range, linear=False) self.__functional = functional self.__sigma = sigma + self.__range = range @property def functional(self): @@ -2277,6 +2330,10 @@ def sigma(self): """Regularization constant, larger means stronger regularization.""" return self.__sigma + def _range(self): + """Range of functional.""" + return self.__range + @property def gradient(self): """The gradient operator.""" @@ -2295,7 +2352,7 @@ class Huber(Functional): .. math:: F(x) = \\int_\Omega f_{\\gamma}(||x(y)||_2) dy - where :mth:`||\cdot||_2` denotes the Euclidean norm for vector-valued + where :math:`||\cdot||_2` denotes the Euclidean norm for vector-valued functions which reduces to the absolute value for scalar-valued functions. The function :math:`f` with smoothing :math:`\\gamma` is given by @@ -2307,18 +2364,20 @@ class Huber(Functional): \\end{cases}. """ - def __init__(self, space, gamma): + def __init__(self, domain, gamma, range=None): """Initialize a new instance. Parameters ---------- - space : `TensorSpace` + domain : `TensorSpace` Domain of the functional. gamma : float Smoothing parameter of the Huber functional. If ``gamma = 0``, the functional is non-smooth and corresponds to the usual L1 norm. For ``gamma > 0``, it has a ``1/gamma``-Lipschitz gradient so that its convex conjugate is ``gamma``-strongly convex. + range : `Field`, optional + Range of the functional. Default: `RealNumbers`. Examples -------- @@ -2371,8 +2430,11 @@ def __init__(self, space, gamma): else: grad_lipschitz = np.inf - super(Huber, self).__init__( - space=space, linear=False, grad_lipschitz=grad_lipschitz) + if range is None: + range = RealNumbers() + + super(Huber, self).__init__(domain=domain, range=range, linear=False, + grad_lipschitz=grad_lipschitz) @property def gamma(self): @@ -2417,7 +2479,7 @@ def proximal(self): odl.solvers.proximal_huber : `proximal factory` for the Huber norm. """ - return proximal_huber(space=self.domain, gamma=self.gamma) + return proximal_huber(domain=self.domain, gamma=self.gamma) @property def gradient(self): @@ -2457,7 +2519,6 @@ def gradient(self): >>> grad.norm() <= norm_one + tol True """ - functional = self class HuberGradient(Operator): @@ -2467,7 +2528,7 @@ class HuberGradient(Operator): def __init__(self): """Initialize a new instance.""" super(HuberGradient, self).__init__( - functional.domain, functional.domain, linear=False) + functional.domain, range=functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" diff --git a/odl/solvers/functional/functional.py b/odl/solvers/functional/functional.py index 804d07359dd..2a637a6938f 100644 --- a/odl/solvers/functional/functional.py +++ b/odl/solvers/functional/functional.py @@ -48,14 +48,17 @@ class Functional(Operator): `_. """ - def __init__(self, space, linear=False, grad_lipschitz=np.nan): + def __init__(self, domain, range=None, linear=False, + grad_lipschitz=np.nan): """Initialize a new instance. Parameters ---------- - space : `LinearSpace` + domain : `LinearSpace` The domain of this functional, i.e., the set of elements to which this functional can be applied. + range : `Field`, optional + Range of the functional. Default: ``domain.field``. linear : bool, optional If `True`, the functional is considered as linear. grad_lipschitz : float, optional @@ -64,7 +67,14 @@ def __init__(self, space, linear=False, grad_lipschitz=np.nan): # Cannot use `super(Functional, self)` here since that breaks # subclasses with multiple inheritance (at least those where both # parents implement `__init__`, e.g., in `ScalingFunctional`) - Operator.__init__(self, domain=space, range=space.field, linear=linear) + if range is None: + default_range = getattr(domain, 'field', None) + if default_range is None: + raise TypeError('Could not infere range for `functional`') + else: + range = default_range + + Operator.__init__(self, domain=domain, range=range, linear=linear) self.__grad_lipschitz = float(grad_lipschitz) @property @@ -468,7 +478,7 @@ def __init__(self, func, scalar): ''.format(func)) Functional.__init__( - self, space=func.domain, linear=func.is_linear, + self, domain=func.domain, range=func.range, linear=func.is_linear, grad_lipschitz=np.abs(scalar) * func.grad_lipschitz) OperatorLeftScalarMult.__init__(self, operator=func, scalar=scalar) @@ -560,7 +570,7 @@ def __init__(self, func, scalar): scalar = func.domain.field.element(scalar) Functional.__init__( - self, space=func.domain, linear=func.is_linear, + self, domain=func.domain, range=func.range, linear=func.is_linear, grad_lipschitz=np.abs(scalar) * func.grad_lipschitz) OperatorRightScalarMult.__init__(self, operator=func, scalar=scalar) @@ -620,7 +630,7 @@ def __init__(self, func, op): ''.format(func)) OperatorComp.__init__(self, left=func, right=op) - Functional.__init__(self, space=op.domain, + Functional.__init__(self, domain=op.domain, range=func.range, linear=(func.is_linear and op.is_linear), grad_lipschitz=np.nan) @@ -637,7 +647,7 @@ class FunctionalCompositionGradient(Operator): def __init__(self): """Initialize a new instance.""" super(FunctionalCompositionGradient, self).__init__( - op.domain, op.domain, linear=False) + domain=op.domain, range=op.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" @@ -682,7 +692,8 @@ def __init__(self, func, vector): ''.format(func)) OperatorRightVectorMult.__init__(self, operator=func, vector=vector) - Functional.__init__(self, space=func.domain) + Functional.__init__(self, domain=func.domain, range=func.range, + linear=func.is_linear) @property def functional(self): @@ -724,9 +735,17 @@ def __init__(self, left, right): if not isinstance(right, Functional): raise TypeError('`right` {!r} is not a `Functional` instance' ''.format(right)) + if left.range.contains_set(right.range): + range = left.range + elif right.range.contains_set(left.range): + range = right.range + else: + raise ValueError('The ranges of the `Functionals` in the sum ' + '({!r} and {!r}) do not contain one another' + ''.format(left.range, right.range)) Functional.__init__( - self, space=left.domain, + self, domain=left.domain, range=range, linear=(left.is_linear and right.is_linear), grad_lipschitz=left.grad_lipschitz + right.grad_lipschitz) OperatorSum.__init__(self, left, right) @@ -767,7 +786,7 @@ def __init__(self, func, scalar): super(FunctionalScalarSum, self).__init__( left=func, - right=ConstantFunctional(space=func.domain, constant=scalar)) + right=ConstantFunctional(domain=func.domain, constant=scalar)) @property def scalar(self): @@ -813,7 +832,7 @@ def __init__(self, func, translation): translation = func.domain.element(translation) super(FunctionalTranslation, self).__init__( - space=func.domain, linear=False, + domain=func.domain, range=func.range, linear=False, grad_lipschitz=func.grad_lipschitz) # TODO: Add case if we have translation -> scaling -> translation? @@ -925,8 +944,13 @@ def __init__(self, left, right): raise TypeError('`func` {} is not a `Functional` instance' ''.format(right)) + if left.range != right.range: + raise ValueError('Ranges of `left` and `right` not same ' + '({!r} and {!r})'.format(left.range, right.range)) + super(InfimalConvolution, self).__init__( - space=left.domain, linear=False, grad_lipschitz=np.nan) + domain=left.domain, range=left.range, linear=False, + grad_lipschitz=np.nan) self.__left = left self.__right = right @@ -1018,7 +1042,7 @@ def __init__(self, func, quadratic_coeff=0, linear_term=None, self.__constant = constant.real super(FunctionalQuadraticPerturb, self).__init__( - space=func.domain, + domain=func.domain, range=func.range, linear=func.is_linear and (quadratic_coeff == 0), grad_lipschitz=grad_lipschitz) @@ -1044,8 +1068,7 @@ def constant(self): def _call(self, x): """Apply the functional to the given point.""" - return (self.functional(x) + - self.quadratic_coeff * x.inner(x) + + return (self.functional(x) + self.quadratic_coeff * x.inner(x) + x.inner(self.linear_term) + self.constant) @property @@ -1147,9 +1170,17 @@ def __init__(self, left, right): if not isinstance(right, Functional): raise TypeError('`right` {} is not a `Functional` instance' ''.format(right)) + if left.range.contains_set(right.range): + range = left.range + elif right.range.contains_set(left.range): + range = right.range + else: + raise ValueError('The ranges of the `Functionals` in the product ' + '({!r} and {!r}) do not contain one another' + ''.format(left.range, right.range)) OperatorPointwiseProduct.__init__(self, left, right) - Functional.__init__(self, left.domain, linear=False, + Functional.__init__(self, left.domain, range=range, linear=False, grad_lipschitz=np.nan) @property @@ -1207,14 +1238,20 @@ def __init__(self, dividend, divisor): raise TypeError('`divisor` {} is not a `Functional` instance' ''.format(divisor)) - if dividend.domain != divisor.domain: - raise ValueError('domains of the operators do not match') + if dividend.range.contains_set(divisor.range): + range = dividend.range + elif divisor.range.contains_set(dividend.range): + range = divisor.range + else: + raise ValueError('The ranges of the `Functionals` in the quotient ' + '({!r} and {!r}) do not contain one another' + ''.format(dividend.range, divisor.range)) self.__dividend = dividend self.__divisor = divisor super(FunctionalQuotient, self).__init__( - dividend.domain, linear=False, grad_lipschitz=np.nan) + dividend.domain, range=range, linear=False, grad_lipschitz=np.nan) @property def dividend(self): @@ -1301,7 +1338,7 @@ def __init__(self, func): ''.format(func)) super(FunctionalDefaultConvexConjugate, self).__init__( - space=func.domain, linear=func.is_linear) + domain=func.domain, range=func.range, linear=func.is_linear) self.__convex_conj = func @property @@ -1412,7 +1449,7 @@ def __init__(self, functional, point, subgrad): grad_lipschitz = functional.grad_lipschitz + subgrad.norm() super(BregmanDistance, self).__init__( - space=functional.domain, linear=False, + domain=functional.domain, range=functional.range, linear=False, grad_lipschitz=grad_lipschitz) @property @@ -1468,7 +1505,7 @@ def __repr__(self): def simple_functional(space, fcall=None, grad=None, prox=None, grad_lip=np.nan, convex_conj_fcall=None, convex_conj_grad=None, convex_conj_prox=None, convex_conj_grad_lip=np.nan, - linear=False): + linear=False, range=None): """Simplified interface to create a functional with specific properties. Users may specify as many properties as-is needed by the application. @@ -1550,7 +1587,7 @@ class SimpleFunctional(Functional): def __init__(self): """Initialize an instance.""" super(SimpleFunctional, self).__init__( - space, linear=linear, grad_lipschitz=grad_lip) + space, range=range, linear=linear, grad_lipschitz=grad_lip) def _call(self, x): """Return ``self(x)``.""" @@ -1585,7 +1622,8 @@ def convex_conj(self): convex_conj_grad=grad, convex_conj_prox=prox, convex_conj_grad_lip=grad_lip, - linear=linear) + linear=linear, + range=range) return SimpleFunctional() diff --git a/odl/solvers/nonsmooth/admm.py b/odl/solvers/nonsmooth/admm.py index 6d1c5343851..c087ef8c83b 100644 --- a/odl/solvers/nonsmooth/admm.py +++ b/odl/solvers/nonsmooth/admm.py @@ -145,6 +145,9 @@ def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs): if callback is not None: callback(x) + if callback is not None: + callback.final() + def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs): """Non-optimized version of ``admm_linearized``. @@ -161,3 +164,5 @@ def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs): u = L(x) + u - z if callback is not None: callback(x) + if callback is not None: + callback.final() diff --git a/odl/solvers/nonsmooth/alternating_dual_updates.py b/odl/solvers/nonsmooth/alternating_dual_updates.py index 9056c47616d..4cea4f6857a 100644 --- a/odl/solvers/nonsmooth/alternating_dual_updates.py +++ b/odl/solvers/nonsmooth/alternating_dual_updates.py @@ -190,6 +190,8 @@ def adupdates(x, g, L, stepsize, inner_stepsizes, niter, random=False, callback(x) if callback is not None and callback_loop == 'outer': callback(x) + if callback is not None: + callback.final() def adupdates_simple(x, g, L, stepsize, inner_stepsizes, niter, diff --git a/odl/solvers/nonsmooth/douglas_rachford.py b/odl/solvers/nonsmooth/douglas_rachford.py index 9641895c7fe..aa66628978f 100644 --- a/odl/solvers/nonsmooth/douglas_rachford.py +++ b/odl/solvers/nonsmooth/douglas_rachford.py @@ -228,6 +228,9 @@ def douglas_rachford_pd(x, f, g, L, niter, tau=None, sigma=None, if callback is not None: callback(p1) + if callback is not None: + callback.final() + # The final result is actually in p1 according to the algorithm, so we need # to assign here. x.assign(p1) diff --git a/odl/solvers/nonsmooth/forward_backward.py b/odl/solvers/nonsmooth/forward_backward.py index 9815b4c59e7..1e39adf8d00 100644 --- a/odl/solvers/nonsmooth/forward_backward.py +++ b/odl/solvers/nonsmooth/forward_backward.py @@ -107,10 +107,12 @@ def forward_backward_pd(x, f, g, L, h, tau, sigma, niter, .. math:: 2 \min \{ \\frac{1}{\\tau}, \\frac{1}{\sigma_1}, \\ldots, \\frac{1}{\sigma_m} \} \cdot \min\{ \\eta, \\nu_1, \\ldots, \\nu_m \} - \cdot \\sqrt{1 - \\tau \\sum_{i=1}^n \\sigma_i ||L_i||^2} > 1, + \cdot \\left(1 - \\sqrt{\\tau \\sum_{i=1}^n \\sigma_i ||L_i||^2}\\right) + > 1, where, if the simpler problem is considered, all :math:`\\nu_i` can be - considered to be :math:`\\infty`. + considered to be :math:`\\infty` and :math:`\\eta` is one over the + Lipschitz-constant of :math:`\\nabla h`. For reference on the forward-backward primal-dual algorithm, see [BC2015]. @@ -188,3 +190,5 @@ def forward_backward_pd(x, f, g, L, h, tau, sigma, niter, if callback is not None: callback(x) + if callback is not None: + callback.final() diff --git a/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py b/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py index 2ac4006c3bf..1a2392fa2a6 100644 --- a/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py +++ b/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py @@ -308,6 +308,9 @@ def pdhg(x, f, g, L, niter, tau, sigma, **kwargs): if callback is not None: callback(x) + if callback is not None: + callback.final() + def pdhg_stepsize(L, tau=None, sigma=None): r"""Default step sizes for `pdhg`. diff --git a/odl/solvers/nonsmooth/proximal_gradient_solvers.py b/odl/solvers/nonsmooth/proximal_gradient_solvers.py index 41642077484..0e7ecea9305 100644 --- a/odl/solvers/nonsmooth/proximal_gradient_solvers.py +++ b/odl/solvers/nonsmooth/proximal_gradient_solvers.py @@ -115,6 +115,8 @@ def proximal_gradient(x, f, g, gamma, niter, callback=None, **kwargs): if callback is not None: callback(x) + if callback is not None: + callback.final() def accelerated_proximal_gradient(x, f, g, gamma, niter, callback=None, @@ -211,6 +213,8 @@ def accelerated_proximal_gradient(x, f, g, gamma, niter, callback=None, if callback is not None: callback(x) + if callback is not None: + callback.final() if __name__ == '__main__': diff --git a/odl/solvers/smooth/gradient.py b/odl/solvers/smooth/gradient.py index 498b4693b9c..05fd1991ad3 100644 --- a/odl/solvers/smooth/gradient.py +++ b/odl/solvers/smooth/gradient.py @@ -103,6 +103,8 @@ def steepest_descent(f, x, line_search=1.0, maxiter=1000, tol=1e-16, if callback is not None: callback(x) + if callback is not None: + callback.final() def adam(f, x, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, @@ -178,6 +180,8 @@ def adam(f, x, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, if callback is not None: callback(x) + if callback is not None: + callback.final() if __name__ == '__main__': diff --git a/odl/solvers/smooth/newton.py b/odl/solvers/smooth/newton.py index f54c64f4c84..b3aa38c6fa4 100644 --- a/odl/solvers/smooth/newton.py +++ b/odl/solvers/smooth/newton.py @@ -355,6 +355,8 @@ def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None, if callback is not None: callback(x) + if callback is not None: + callback.final() def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000, @@ -484,6 +486,8 @@ def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000, if callback is not None: callback(x) + if callback is not None: + callback.final() if __name__ == '__main__': diff --git a/odl/solvers/smooth/nonlinear_cg.py b/odl/solvers/smooth/nonlinear_cg.py index 74d85189889..560f5b28dd9 100644 --- a/odl/solvers/smooth/nonlinear_cg.py +++ b/odl/solvers/smooth/nonlinear_cg.py @@ -130,3 +130,6 @@ def conjugate_gradient_nonlinear(f, x, line_search=1.0, maxiter=1000, nreset=0, if callback is not None: callback(x) + + if callback is not None: + callback.final() diff --git a/odl/solvers/util/callback.py b/odl/solvers/util/callback.py index 31f333b296d..98f27df1b6f 100644 --- a/odl/solvers/util/callback.py +++ b/odl/solvers/util/callback.py @@ -102,6 +102,13 @@ def reset(self): """ pass + def final(self): + """Is called once the algorithm finished computation + + Used e.g. for `CallbackShowConvergence` to save the plot after all + iterations are finished""" + pass + def __repr__(self): """Return ``repr(self)``.""" return '{}()'.format(self.__class__.__name__) @@ -134,6 +141,10 @@ def reset(self): for callback in self.callbacks: callback.reset() + def final(self): + for callback in self.callbacks: + callback.final() + def __repr__(self): """Return ``repr(self)``.""" return ' & '.join('{!r}'.format(p) for p in self.callbacks) @@ -822,7 +833,7 @@ class CallbackShowConvergence(Callback): """Displays a convergence plot.""" def __init__(self, functional, title='convergence', logx=False, logy=False, - **kwargs): + saveto=None, **kwargs): """Initialize a new instance. Parameters @@ -836,6 +847,8 @@ def __init__(self, functional, title='convergence', logx=False, logy=False, If true, the x axis is logarithmic. logx : bool, optional If true, the y axis is logarithmic. + saveto: str, optional + Filename for saving the plot. If `None` nothing is saved. Other Parameters ---------------- @@ -846,6 +859,9 @@ def __init__(self, functional, title='convergence', logx=False, logy=False, self.title = title self.logx = logx self.logy = logy + self.saveto = saveto + self.dpi = kwargs.pop('dpi', 300) + self.bbox_inches = kwargs.pop('bbox_inches', 'tight') self.kwargs = kwargs self.iter = 0 @@ -873,6 +889,11 @@ def reset(self): """Set `iter` to 0.""" self.iter = 0 + def final(self): + if self.saveto: + self.fig.savefig(self.saveto, dpi=self.dpi, + bbox_inches=self.bbox_inches) + def __repr__(self): """Return ``repr(self)``.""" return '{}(functional={}, title={}, logx={}, logy={})'.format( diff --git a/odl/space/pspace.py b/odl/space/pspace.py index 3296fb40c2f..ef46e4fafca 100644 --- a/odl/space/pspace.py +++ b/odl/space/pspace.py @@ -1395,7 +1395,7 @@ def show(self, title=None, indices=None, **kwargs): ``(indices, Ellipsis)``. In particular, for ``None``, all parts are shown with default slicing. - in_figs : sequence of `matplotlib.figure.Figure`, optional + fig : sequence of `matplotlib.figure.Figure`, optional Update these figures instead of creating new ones. Typically the return value of an earlier call to ``show`` is used for this parameter. diff --git a/odl/ufunc_ops/ufunc_ops.py b/odl/ufunc_ops/ufunc_ops.py index af88b1f408a..e5d0e5401d7 100644 --- a/odl/ufunc_ops/ufunc_ops.py +++ b/odl/ufunc_ops/ufunc_ops.py @@ -326,7 +326,7 @@ def __init__(self, field): "".format(name)) linear = name in LINEAR_UFUNCS - Functional.__init__(self, space=field, linear=linear) + Functional.__init__(self, domain=field, linear=linear) def _call(self, x): """Return ``self(x)``."""