Skip to content

fetch from origin #124

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Jul 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
309 changes: 125 additions & 184 deletions .pylintdict

Large diffs are not rendered by default.

19 changes: 14 additions & 5 deletions qiskit/aqua/components/optimizers/gsls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

"""Line search with Gaussian-smoothed samples on a sphere."""

import warnings
from typing import Dict, Optional, Tuple, List, Callable
import logging
import numpy as np
Expand All @@ -32,14 +33,14 @@ class GSLS(Optimizer):
based on Gaussian-smoothed samples on a sphere.
"""

_OPTIONS = ['max_iter', 'max_eval', 'disp', 'sampling_radius',
_OPTIONS = ['maxiter', 'max_eval', 'disp', 'sampling_radius',
'sample_size_factor', 'initial_step_size', 'min_step_size',
'step_size_multiplier', 'armijo_parameter',
'min_gradient_norm', 'max_failed_rejection_sampling']

# pylint:disable=unused-argument
def __init__(self,
max_iter: int = 10000,
maxiter: int = 10000,
max_eval: int = 10000,
disp: bool = False,
sampling_radius: float = 1.0e-6,
Expand All @@ -49,10 +50,11 @@ def __init__(self,
step_size_multiplier: float = 0.4,
armijo_parameter: float = 1.0e-1,
min_gradient_norm: float = 1e-8,
max_failed_rejection_sampling: int = 50) -> None:
max_failed_rejection_sampling: int = 50,
max_iter: Optional[int] = None) -> None:
"""
Args:
max_iter: Maximum number of iterations.
maxiter: Maximum number of iterations.
max_eval: Maximum number of evaluations.
disp: Set to True to display convergence messages.
sampling_radius: Sampling radius to determine gradient estimate.
Expand All @@ -67,8 +69,15 @@ def __init__(self,
min_gradient_norm: If the gradient norm is below this threshold, the algorithm stops.
max_failed_rejection_sampling: Maximum number of attempts to sample points within
bounds.
max_iter: Deprecated, use maxiter.
"""
super().__init__()
if max_iter is not None:
warnings.warn('The max_iter parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_iter
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
Expand Down Expand Up @@ -153,7 +162,7 @@ def ls_optimize(self, n: int, obj_fun: Callable, initial_point: np.ndarray, var_
x = initial_point
x_value = obj_fun(x)
n_evals += 1
while iter_count < self._options['max_iter'] \
while iter_count < self._options['maxiter'] \
and n_evals < self._options['max_eval']:

# Determine set of sample points
Expand Down
59 changes: 37 additions & 22 deletions qiskit/aqua/components/optimizers/spsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

"""Simultaneous Perturbation Stochastic Approximation optimizer."""

import warnings
from typing import Optional, List, Callable
import logging

import numpy as np
Expand Down Expand Up @@ -62,18 +64,19 @@ class SPSA(Optimizer):

# pylint: disable=unused-argument
def __init__(self,
max_trials: int = 1000,
maxiter: int = 1000,
save_steps: int = 1,
last_avg: int = 1,
c0: float = _C0,
c1: float = 0.1,
c2: float = 0.602,
c3: float = 0.101,
c4: float = 0,
skip_calibration: float = False) -> None:
skip_calibration: float = False,
max_trials: Optional[int] = None) -> None:
"""
Args:
max_trials: Maximum number of iterations to perform.
maxiter: Maximum number of iterations to perform.
save_steps: Save intermediate info every save_steps step. It has a min. value of 1.
last_avg: Averaged parameters over the last_avg iterations.
If last_avg = 1, only the last iteration is considered. It has a min. value of 1.
Expand All @@ -83,14 +86,21 @@ def __init__(self,
c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.
c4: The parameter used to control a as well.
skip_calibration: Skip calibration and use provided c(s) as is.
max_trials: Deprecated, use maxiter.
"""
validate_min('save_steps', save_steps, 1)
validate_min('last_avg', last_avg, 1)
super().__init__()
if max_trials is not None:
warnings.warn('The max_trials parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_trials
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
self._max_trials = max_trials
self._maxiter = maxiter
self._parameters = np.array([c0, c1, c2, c3, c4])
self._skip_calibration = skip_calibration

Expand All @@ -113,33 +123,37 @@ def optimize(self, num_vars, objective_function, gradient_function=None,
logger.debug('Parameters: %s', self._parameters)
if not self._skip_calibration:
# at least one calibration, at most 25 calibrations
num_steps_calibration = min(25, max(1, self._max_trials // 5))
num_steps_calibration = min(25, max(1, self._maxiter // 5))
self._calibration(objective_function, initial_point, num_steps_calibration)
else:
logger.debug('Skipping calibration, parameters used as provided.')

opt, sol, _, _, _, _ = self._optimization(objective_function,
initial_point,
max_trials=self._max_trials,
maxiter=self._maxiter,
**self._options)
return sol, opt, None

def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_avg=1):
def _optimization(self,
obj_fun: Callable,
initial_theta: np.ndarray,
maxiter: int,
save_steps: int = 1,
last_avg: int = 1) -> List:
"""Minimizes obj_fun(theta) with a simultaneous perturbation stochastic
approximation algorithm.

Args:
obj_fun (callable): the function to minimize
initial_theta (numpy.array): initial value for the variables of
obj_fun
max_trials (int) : the maximum number of trial steps ( = function
obj_fun: the function to minimize
initial_theta: initial value for the variables of obj_fun
maxiter: the maximum number of trial steps ( = function
calls/2) in the optimization
save_steps (int) : stores optimization outcomes each 'save_steps'
save_steps: stores optimization outcomes each 'save_steps'
trial steps
last_avg (int) : number of last updates of the variables to average
last_avg: number of last updates of the variables to average
on for the final obj_fun
Returns:
list: a list with the following elements:
a list with the following elements:
cost_final : final optimized value for obj_fun
theta_best : final values of the variables corresponding to
cost_final
Expand All @@ -159,7 +173,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_minus_save = []
theta = initial_theta
theta_best = np.zeros(initial_theta.shape)
for k in range(max_trials):
for k in range(maxiter):
# SPSA Parameters
a_spsa = float(self._parameters[0]) / np.power(k + 1 + self._parameters[4],
self._parameters[2])
Expand Down Expand Up @@ -187,7 +201,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_plus_save.append(cost_plus)
cost_minus_save.append(cost_minus)

if k >= max_trials - last_avg:
if k >= maxiter - last_avg:
theta_best += theta / last_avg
# final cost update
cost_final = obj_fun(theta_best)
Expand All @@ -196,7 +210,10 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
return [cost_final, theta_best, cost_plus_save, cost_minus_save,
theta_plus_save, theta_minus_save]

def _calibration(self, obj_fun, initial_theta, stat):
def _calibration(self,
obj_fun: Callable,
initial_theta: np.ndarray,
stat: int):
"""Calibrates and stores the SPSA parameters back.

SPSA parameters are c0 through c5 stored in parameters array
Expand All @@ -207,11 +224,9 @@ def _calibration(self, obj_fun, initial_theta, stat):
c1 is initial_c and is first perturbation of initial_theta.

Args:
obj_fun (callable): the function to minimize.
initial_theta (numpy.array): initial value for the variables of
obj_fun.
stat (int) : number of random gradient directions to average on in
the calibration.
obj_fun: the function to minimize.
initial_theta: initial value for the variables of obj_fun.
stat: number of random gradient directions to average on in the calibration.
"""

target_update = self._parameters[0]
Expand Down
3 changes: 2 additions & 1 deletion qiskit/aqua/operators/converters/circuit_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class CircuitSampler(ConverterBase):
the same circuit efficiently. If you are converting multiple different Operators,
you are better off using a different CircuitSampler for each Operator to avoid cache thrashing.
"""

def __init__(self,
backend: Union[BaseBackend, QuantumInstance] = None,
statevector: Optional[bool] = None,
Expand Down Expand Up @@ -302,7 +303,7 @@ def sample_circuits(self,
result_sfn = StateFn(op_c.coeff * results.get_statevector(circ_index))
else:
shots = self.quantum_instance._run_config.shots
result_sfn = StateFn({b: (v * op_c.coeff / shots) ** .5
result_sfn = StateFn({b: (v / shots) ** 0.5 * op_c.coeff
for (b, v) in results.get_counts(circ_index).items()})
if self._attach_results:
result_sfn.execution_results = circ_results
Expand Down
4 changes: 2 additions & 2 deletions qiskit/aqua/operators/list_ops/composed_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,9 @@ def reduce(self) -> OperatorBase:
def distribute_compose(l, r):
if isinstance(l, ListOp) and l.distributive:
# Either ListOp or SummedOp, returns correct type
return l.__class__([distribute_compose(l_op, r) for l_op in l.oplist])
return l.__class__([distribute_compose(l_op * l.coeff, r) for l_op in l.oplist])
if isinstance(r, ListOp) and r.distributive:
return r.__class__([distribute_compose(l, r_op) for r_op in r.oplist])
return r.__class__([distribute_compose(l, r_op * r.coeff) for r_op in r.oplist])
else:
return l.compose(r)

Expand Down
72 changes: 63 additions & 9 deletions qiskit/aqua/operators/list_ops/summed_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

""" SummedOp Class """

from functools import reduce
from typing import List, Union, cast

import numpy as np
Expand Down Expand Up @@ -72,9 +71,6 @@ def add(self, other: OperatorBase) -> OperatorBase:
Returns:
A ``SummedOp`` equivalent to the sum of self and other.
"""
if self == other:
return self.mul(2.0)

self_new_ops = self.oplist if self.coeff == 1 \
else [op.mul(self.coeff) for op in self.oplist]
if isinstance(other, SummedOp):
Expand All @@ -84,10 +80,10 @@ def add(self, other: OperatorBase) -> OperatorBase:
other_new_ops = [other]
return SummedOp(self_new_ops + other_new_ops)

def simplify(self) -> 'SummedOp':
def collapse_summands(self) -> 'SummedOp':
"""Return Operator by simplifying duplicate operators.
E.g., ``SummedOp([2 * X ^ Y, X ^ Y]).simplify() -> SummedOp([3 * X ^ Y])``.
E.g., ``SummedOp([2 * X ^ Y, X ^ Y]).collapse_summands() -> SummedOp([3 * X ^ Y])``.
Returns:
A simplified ``SummedOp`` equivalent to self.
Expand All @@ -113,11 +109,23 @@ def simplify(self) -> 'SummedOp':
coeffs.append(self.coeff)
return SummedOp([op * coeff for op, coeff in zip(oplist, coeffs)]) # type: ignore

# Try collapsing list or trees of Sums.
# TODO be smarter about the fact that any two ops in oplist could be evaluated for sum.
def reduce(self) -> OperatorBase:
reduced_ops = [op.reduce() for op in self.oplist]
reduced_ops = reduce(lambda x, y: x.add(y), reduced_ops) * self.coeff
"""Try collapsing list or trees of sums.
Tries to sum up duplicate operators and reduces the operators
in the sum.
Returns:
A collapsed version of self, if possible.
"""
# reduce constituents
reduced_ops = sum(op.reduce() for op in self.oplist) * self.coeff

# group duplicate operators
if isinstance(reduced_ops, SummedOp):
reduced_ops = reduced_ops.collapse_summands()

if isinstance(reduced_ops, SummedOp) and len(reduced_ops.oplist) == 1:
return reduced_ops.oplist[0]
else:
Expand All @@ -142,3 +150,49 @@ def to_legacy_op(self, massive: bool = False) -> LegacyBaseOperator:
coeff = cast(float, self.coeff)

return self.combo_fn(legacy_ops) * coeff

def equals(self, other: OperatorBase) -> bool:
"""Check if other is equal to self.
Note:
This is not a mathematical check for equality.
If ``self`` and ``other`` implement the same operation but differ
in the representation (e.g. different type of summands)
``equals`` will evaluate to ``False``.
Args:
other: The other operator to check for equality.
Returns:
True, if other and self are equal, otherwise False.
Examples:
>>> from qiskit.aqua.operators import X, Z
>>> 2 * X == X + X
True
>>> X + Z == Z + X
True
"""
self_reduced, other_reduced = self.reduce(), other.reduce()
if not isinstance(other_reduced, type(self_reduced)):
return False

# check if reduced op is still a SummedOp
if not isinstance(self_reduced, SummedOp):
return self_reduced == other_reduced

self_reduced = cast(SummedOp, self_reduced)
other_reduced = cast(SummedOp, other_reduced)
if len(self_reduced.oplist) != len(other_reduced.oplist):
return False

# absorb coeffs into the operators
if self_reduced.coeff != 1:
self_reduced = SummedOp(
[op * self_reduced.coeff for op in self_reduced.oplist]) # type: ignore
if other_reduced.coeff != 1:
other_reduced = SummedOp(
[op * other_reduced.coeff for op in other_reduced.oplist]) # type: ignore

# compare independent of order
return set(self_reduced) == set(other_reduced)
3 changes: 2 additions & 1 deletion qiskit/aqua/operators/primitive_ops/circuit_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,8 @@ def reduce(self) -> OperatorBase:
# Check if Identity or empty instruction (need to check that type is exactly
# Instruction because some gates have lazy gate.definition population)
# pylint: disable=unidiomatic-typecheck
if isinstance(gate, IGate) or (type(gate) == Instruction and gate.definition == []):
if isinstance(gate, IGate) or (type(gate) == Instruction and
gate.definition.data == []):
del self.primitive.data[i] # type: ignore
return self

Expand Down
4 changes: 0 additions & 4 deletions qiskit/aqua/operators/primitive_ops/pauli_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,6 @@ def exp_i(self) -> OperatorBase:
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)

def __hash__(self) -> int:
# Need this to be able to easily construct AbelianGraphs
return hash(str(self))

def commutes(self, other_op: OperatorBase) -> bool:
""" Returns whether self commutes with other_op.
Expand Down
3 changes: 3 additions & 0 deletions qiskit/aqua/operators/primitive_ops/primitive_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,9 @@ def log_i(self, massive: bool = False) -> OperatorBase:
def __str__(self) -> str:
raise NotImplementedError

def __hash__(self) -> int:
return hash(repr(self))

def __repr__(self) -> str:
return "{}({}, coeff={})".format(type(self).__name__, repr(self.primitive), self.coeff)

Expand Down
5 changes: 3 additions & 2 deletions qiskit/aqua/operators/state_fns/circuit_state_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ def assign_parameters(self, param_dict: dict) -> OperatorBase:
param_instersection = set(unrolled_dict.keys()) & self.primitive.parameters
binds = {param: unrolled_dict[param] for param in param_instersection}
qc = self.to_circuit().assign_parameters(binds)
return self.__class__(qc, coeff=param_value)
return self.__class__(qc, coeff=param_value, is_measurement=self.is_measurement)

def eval(self,
front: Union[str, dict, np.ndarray,
Expand Down Expand Up @@ -349,7 +349,8 @@ def reduce(self) -> OperatorBase:
# Check if Identity or empty instruction (need to check that type is exactly
# Instruction because some gates have lazy gate.definition population)
# pylint: disable=unidiomatic-typecheck
if isinstance(gate, IGate) or (type(gate) == Instruction and gate.definition == []):
if isinstance(gate, IGate) or (type(gate) == Instruction and
gate.definition.data == []):
del self.primitive.data[i]
return self

Expand Down
Loading