diff --git a/moe/bandit/bandit_interface.py b/moe/bandit/bandit_interface.py index 0ed6c3e2..d703fa83 100644 --- a/moe/bandit/bandit_interface.py +++ b/moe/bandit/bandit_interface.py @@ -5,7 +5,7 @@ from abc import ABCMeta, abstractmethod -class BanditInterface(object): +class BanditInterface(object, metaclass=ABCMeta): r"""Interface for a bandit algorithm. @@ -16,8 +16,6 @@ class BanditInterface(object): """ - __metaclass__ = ABCMeta - @abstractmethod def allocate_arms(self): r"""Compute the allocation to each arm given ``historical_info``, running bandit `subtype`` endpoint with hyperparameters in `hyperparameter_info``. @@ -46,9 +44,9 @@ def choose_arm(arms_to_allocations): if not arms_to_allocations: raise ValueError('arms_to_allocations is empty!') - allocations = numpy.array(arms_to_allocations.values()) + allocations = numpy.array(list(arms_to_allocations.values())) # The winning arm is chosen based on the distribution of arm allocations. winner = numpy.argmax(numpy.random.dirichlet(allocations)) # While the internal order of a dict is unknowable a priori, the order presented by the various iterators # and list-ify methods is always the same as long as the dict is not modified between calls to these methods. - return arms_to_allocations.keys()[winner] + return list(arms_to_allocations.keys())[winner] diff --git a/moe/bandit/bla/bla.py b/moe/bandit/bla/bla.py index 799d693a..20cd469a 100644 --- a/moe/bandit/bla/bla.py +++ b/moe/bandit/bla/bla.py @@ -44,7 +44,7 @@ def __init__( self._historical_info = copy.deepcopy(historical_info) self._subtype = subtype # Validate that every arm is a Bernoulli arm. - for arm in self._historical_info.arms_sampled.itervalues(): + for arm in self._historical_info.arms_sampled.values(): if not isinstance(arm, BernoulliArm): raise ValueError('All arms have to be Bernoulli arms!') @@ -122,5 +122,5 @@ def get_winning_arm_names(self, arms_sampled): if not arms_sampled: raise ValueError('arms_sampled is empty!') - bla_payoff_arm_name_list = [(self.get_bla_payoff(sampled_arm), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()] + bla_payoff_arm_name_list = [(self.get_bla_payoff(sampled_arm), arm_name) for arm_name, sampled_arm in arms_sampled.items()] return get_winning_arm_names_from_payoff_arm_name_list(bla_payoff_arm_name_list) diff --git a/moe/bandit/data_containers.py b/moe/bandit/data_containers.py index 3dea9e7f..f6222fa0 100644 --- a/moe/bandit/data_containers.py +++ b/moe/bandit/data_containers.py @@ -202,7 +202,7 @@ def __str__(self, pretty_print=True): def json_payload(self): """Construct a json serializeable and MOE REST recognizeable dictionary of the historical data.""" json_arms_sampled = {} - for name, arm in self._arms_sampled.iteritems(): + for name, arm in self._arms_sampled.items(): json_arms_sampled[name] = arm.json_payload() return {'arms_sampled': json_arms_sampled} @@ -217,7 +217,7 @@ def validate_sample_arms(sample_arms): """ if sample_arms: - for arm in sample_arms.itervalues(): + for arm in sample_arms.values(): arm.validate() def append_sample_arms(self, sample_arms, validate=True): @@ -246,7 +246,7 @@ def _update_historical_data(self, sample_arms): :param sample_arms: the already-sampled arms: wins, losses, and totals :type sample_arms: dictionary of (arm name, SampleArm) key-value pairs """ - for name, arm in sample_arms.iteritems(): + for name, arm in sample_arms.items(): if name in self._arms_sampled: self._arms_sampled[name] += arm else: diff --git a/moe/bandit/epsilon/epsilon_first.py b/moe/bandit/epsilon/epsilon_first.py index 1b96d56a..d0923598 100644 --- a/moe/bandit/epsilon/epsilon_first.py +++ b/moe/bandit/epsilon/epsilon_first.py @@ -95,7 +95,7 @@ def allocate_arms(self): if not arms_sampled: raise ValueError('sample_arms is empty!') - num_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()]) + num_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.values()]) # Exploration phase, trials 1,2,..., epsilon * T # Allocate equal probability to all arms if num_sampled < self._total_samples * self._epsilon: diff --git a/moe/bandit/epsilon/epsilon_greedy.py b/moe/bandit/epsilon/epsilon_greedy.py index b27f8205..d043dc17 100644 --- a/moe/bandit/epsilon/epsilon_greedy.py +++ b/moe/bandit/epsilon/epsilon_greedy.py @@ -75,7 +75,7 @@ def allocate_arms(self): arms_to_allocations = {} # With probability epsilon, choose a winning arm at random. Therefore, we split the allocation epsilon among all arms. - for arm_name in arms_sampled.iterkeys(): + for arm_name in arms_sampled.keys(): arms_to_allocations[arm_name] = epsilon_allocation # With probability 1-epsilon, split allocation among winning arms. diff --git a/moe/bandit/epsilon/epsilon_interface.py b/moe/bandit/epsilon/epsilon_interface.py index 40862264..739d9da6 100644 --- a/moe/bandit/epsilon/epsilon_interface.py +++ b/moe/bandit/epsilon/epsilon_interface.py @@ -62,7 +62,7 @@ def get_winning_arm_names(arms_sampled): raise ValueError('arms_sampled is empty!') avg_payoff_arm_name_list = [] - for arm_name, sampled_arm in arms_sampled.iteritems(): + for arm_name, sampled_arm in arms_sampled.items(): avg_payoff = numpy.float64(sampled_arm.win - sampled_arm.loss) / sampled_arm.total if sampled_arm.total > 0 else 0 avg_payoff_arm_name_list.append((avg_payoff, arm_name)) diff --git a/moe/bandit/ucb/ucb_interface.py b/moe/bandit/ucb/ucb_interface.py index 81411abc..0fc5a758 100644 --- a/moe/bandit/ucb/ucb_interface.py +++ b/moe/bandit/ucb/ucb_interface.py @@ -58,7 +58,7 @@ def get_unsampled_arm_names(arms_sampled): if not arms_sampled: raise ValueError('arms_sampled is empty!') - unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.iteritems() if sampled_arm.total == 0] + unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.items() if sampled_arm.total == 0] return frozenset(unsampled_arm_name_list) @abstractmethod @@ -131,8 +131,8 @@ def get_winning_arm_names(self, arms_sampled): if unsampled_arm_names: return unsampled_arm_names - number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()]) + number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.values()]) - ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()] + ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.items()] return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list) diff --git a/moe/bandit/utils.py b/moe/bandit/utils.py index 56a57dea..75c21e8f 100644 --- a/moe/bandit/utils.py +++ b/moe/bandit/utils.py @@ -20,9 +20,9 @@ def get_winning_arm_names_from_payoff_arm_name_list(payoff_arm_name_list): best_payoff, _ = max(payoff_arm_name_list) # Filter out arms that have payoff less than the best payoff - winning_arm_payoff_name_list = filter(lambda payoff_arm_name: payoff_arm_name[0] == best_payoff, payoff_arm_name_list) + winning_arm_payoff_name_list = [payoff_arm_name for payoff_arm_name in payoff_arm_name_list if payoff_arm_name[0] == best_payoff] # Extract a list of winning arm names from a list of (payoff, arm name) tuples. - _, winning_arm_name_list = map(list, zip(*winning_arm_payoff_name_list)) + _, winning_arm_name_list = list(map(list, list(zip(*winning_arm_payoff_name_list)))) winning_arm_names = frozenset(winning_arm_name_list) return winning_arm_names @@ -46,14 +46,14 @@ def get_equal_arm_allocations(arms_sampled, winning_arm_names=None): # If no ``winning_arm_names`` given, split allocations among ``arms_sampled``. if winning_arm_names is None: - winning_arm_names = frozenset([arm_name for arm_name in arms_sampled.iterkeys()]) + winning_arm_names = frozenset([arm_name for arm_name in arms_sampled.keys()]) num_winning_arms = len(winning_arm_names) arms_to_allocations = {} winning_arm_allocation = 1.0 / num_winning_arms # Split allocation among winning arms, all other arms get allocation of 0. - for arm_name in arms_sampled.iterkeys(): + for arm_name in arms_sampled.keys(): arms_to_allocations[arm_name] = winning_arm_allocation if arm_name in winning_arm_names else 0.0 return arms_to_allocations diff --git a/moe/easy_interface/simple_endpoint.py b/moe/easy_interface/simple_endpoint.py index 0972e1b1..3f5e3182 100644 --- a/moe/easy_interface/simple_endpoint.py +++ b/moe/easy_interface/simple_endpoint.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Simple functions for hitting the REST endpoints of a MOE service.""" import contextlib -import urllib2 +import urllib.request, urllib.error, urllib.parse import simplejson as json @@ -20,8 +20,8 @@ def call_endpoint_with_payload(rest_host, rest_port, endpoint, json_payload, tes """Send a POST request to a ``url`` with a given ``json_payload``, return the response as a dict.""" if testapp is None: url = "http://{0}:{1:d}{2}".format(rest_host, rest_port, endpoint) - request = urllib2.Request(url, json_payload, {'Content-Type': 'application/json'}) - with contextlib.closing(urllib2.urlopen(request)) as f: + request = urllib.request.Request(url, json_payload, {'Content-Type': 'application/json'}) + with contextlib.closing(urllib.request.urlopen(request)) as f: response = f.read() else: response = testapp.post(endpoint, json_payload).body diff --git a/moe/optimal_learning/python/comparison.py b/moe/optimal_learning/python/comparison.py index 0e1d9248..19f116dc 100644 --- a/moe/optimal_learning/python/comparison.py +++ b/moe/optimal_learning/python/comparison.py @@ -86,7 +86,7 @@ def __repr__(self): """ return '{0:s}({1:s})'.format( self.__class__.__name__, - ', '.join(map(lambda pair: '{0}={1}'.format(pair[0], pair[1]), self._get_comparable_members())), + ', '.join(['{0}={1}'.format(pair[0], pair[1]) for pair in self._get_comparable_members()]), ) def __eq__(self, other): diff --git a/moe/optimal_learning/python/data_containers.py b/moe/optimal_learning/python/data_containers.py index 48bdfd28..c3f8da16 100644 --- a/moe/optimal_learning/python/data_containers.py +++ b/moe/optimal_learning/python/data_containers.py @@ -199,7 +199,7 @@ def validate_historical_data(dim, points_sampled, points_sampled_value, points_s raise ValueError('Input arrays do not have the same leading dimension: (points_sampled, value, noise) = ({0:d}, {1:d}, {2:d})'.format(points_sampled.shape[0], points_sampled_value.size, points_sampled_noise_variance.size)) if points_sampled.shape[0] > 0: - for i in xrange(points_sampled.shape[0]): + for i in range(points_sampled.shape[0]): temp = SamplePoint(points_sampled[i], points_sampled_value[i], points_sampled_noise_variance[i]) temp.validate(dim=dim) @@ -262,7 +262,7 @@ def to_list_of_sample_points(self): """ return [SamplePoint(numpy.copy(self._points_sampled[i]), self._points_sampled_value[i], noise_variance=self._points_sampled_noise_variance[i]) - for i in xrange(self.num_sampled)] + for i in range(self.num_sampled)] def _update_historical_data(self, offset, sample_points): """Copy (in "transposed" order) data from ``sample_points`` into this object's data members, starting at index ``offset``. diff --git a/moe/optimal_learning/python/geometry_utils.py b/moe/optimal_learning/python/geometry_utils.py index d1787f5e..f00a545b 100644 --- a/moe/optimal_learning/python/geometry_utils.py +++ b/moe/optimal_learning/python/geometry_utils.py @@ -36,7 +36,7 @@ def generate_latin_hypercube_points(num_points, domain_bounds): ordering = numpy.arange(num_points) numpy.random.shuffle(ordering) - for j in xrange(num_points): + for j in range(num_points): point_base = interval.min + subcube_edge_length * ordering[j] points[j, i] = point_base + numpy.random.uniform(0.0, subcube_edge_length) @@ -82,7 +82,7 @@ def generate_grid_points(points_per_dimension, domain_bounds): # vstack stacks inputs vertically: so for our 1D arrays, the i-th input becomes # the i-th row in a matrix. And since each mesh_grid output has *every* coordinate # of the grid in that dimension, the *columns* of the stack contain every grid point. - return numpy.vstack(map(numpy.ravel, mesh_grid)).T + return numpy.vstack(list(map(numpy.ravel, mesh_grid))).T # See ClosedInterval (below) for docstring. diff --git a/moe/optimal_learning/python/interfaces/covariance_interface.py b/moe/optimal_learning/python/interfaces/covariance_interface.py index 529fcd63..2fcdcdea 100644 --- a/moe/optimal_learning/python/interfaces/covariance_interface.py +++ b/moe/optimal_learning/python/interfaces/covariance_interface.py @@ -27,7 +27,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class CovarianceInterface(object): +class CovarianceInterface(object, metaclass=ABCMeta): r"""Interface for a covariance function: covariance of two points and spatial/hyperparameter derivatives. @@ -48,8 +48,6 @@ class CovarianceInterface(object): """ - __metaclass__ = ABCMeta - @abstractproperty def num_hyperparameters(self): """Return the number of hyperparameters of this covariance function.""" diff --git a/moe/optimal_learning/python/interfaces/domain_interface.py b/moe/optimal_learning/python/interfaces/domain_interface.py index b06ed693..c87555d2 100644 --- a/moe/optimal_learning/python/interfaces/domain_interface.py +++ b/moe/optimal_learning/python/interfaces/domain_interface.py @@ -3,12 +3,10 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class DomainInterface(object): +class DomainInterface(object, metaclass=ABCMeta): """Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization).""" - __metaclass__ = ABCMeta - @abstractproperty def dim(self): """Return the number of spatial dimensions.""" diff --git a/moe/optimal_learning/python/interfaces/expected_improvement_interface.py b/moe/optimal_learning/python/interfaces/expected_improvement_interface.py index 7861f731..f73f431e 100644 --- a/moe/optimal_learning/python/interfaces/expected_improvement_interface.py +++ b/moe/optimal_learning/python/interfaces/expected_improvement_interface.py @@ -30,7 +30,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class ExpectedImprovementInterface(object): +class ExpectedImprovementInterface(object, metaclass=ABCMeta): r"""Interface for Expected Improvement computation: EI and its gradient at specified point(s) sampled from a GaussianProcess. @@ -50,8 +50,6 @@ class ExpectedImprovementInterface(object): """ - __metaclass__ = ABCMeta - @abstractproperty def dim(self): """Return the number of spatial dimensions.""" diff --git a/moe/optimal_learning/python/interfaces/gaussian_process_interface.py b/moe/optimal_learning/python/interfaces/gaussian_process_interface.py index 8ea1cc95..01905d09 100644 --- a/moe/optimal_learning/python/interfaces/gaussian_process_interface.py +++ b/moe/optimal_learning/python/interfaces/gaussian_process_interface.py @@ -14,7 +14,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class GaussianProcessDataInterface(object): +class GaussianProcessDataInterface(object, metaclass=ABCMeta): """Core data interface for constructing or manipulating a Gaussian Process Prior (GPP). @@ -34,8 +34,6 @@ class GaussianProcessDataInterface(object): """ - __metaclass__ = ABCMeta - @abstractmethod def get_covariance_copy(self): """Return a copy of the covariance object specifying the Gaussian Process. @@ -61,7 +59,7 @@ def get_core_data_copy(self): return self.get_covariance_copy(), self.get_historical_data_copy() -class GaussianProcessInterface(GaussianProcessDataInterface): +class GaussianProcessInterface(GaussianProcessDataInterface, metaclass=ABCMeta): r"""Interface for a GaussianProcess: mean, variance, gradients thereof, and data I/O. @@ -101,8 +99,6 @@ class GaussianProcessInterface(GaussianProcessDataInterface): """ - __metaclass__ = ABCMeta - @staticmethod def _clamp_num_derivatives(num_points, num_derivatives): """Clamp num_derivatives so that the result is 0 <= result <= num_points; negative num_derivatives yields num_points. diff --git a/moe/optimal_learning/python/interfaces/log_likelihood_interface.py b/moe/optimal_learning/python/interfaces/log_likelihood_interface.py index 9ee0ce4f..37efe7fa 100644 --- a/moe/optimal_learning/python/interfaces/log_likelihood_interface.py +++ b/moe/optimal_learning/python/interfaces/log_likelihood_interface.py @@ -89,7 +89,7 @@ from moe.optimal_learning.python.interfaces.gaussian_process_interface import GaussianProcessDataInterface -class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface): +class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface, metaclass=ABCMeta): r"""Interface for computation of log likelihood (and log likelihood-like) measures of model fit along with its gradient and hessian. @@ -103,8 +103,6 @@ class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface): """ - __metaclass__ = ABCMeta - @abstractproperty def dim(self): """Return the number of spatial dimensions.""" diff --git a/moe/optimal_learning/python/interfaces/optimization_interface.py b/moe/optimal_learning/python/interfaces/optimization_interface.py index 47899ef7..c2532172 100644 --- a/moe/optimal_learning/python/interfaces/optimization_interface.py +++ b/moe/optimal_learning/python/interfaces/optimization_interface.py @@ -12,7 +12,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class OptimizableInterface(object): +class OptimizableInterface(object, metaclass=ABCMeta): r"""Interface that an object must fulfill to be optimized by an implementation of OptimizerInterface. @@ -37,8 +37,6 @@ class OptimizableInterface(object): """ - __metaclass__ = ABCMeta - @abstractproperty def problem_size(self): """Return the number of independent parameters to optimize.""" @@ -93,7 +91,7 @@ def compute_hessian_objective_function(self, **kwargs): pass -class OptimizerInterface(object): +class OptimizerInterface(object, metaclass=ABCMeta): r"""Interface to *maximize* any object implementing OptimizableInterface (defined above). @@ -106,8 +104,6 @@ class OptimizerInterface(object): """ - __metaclass__ = ABCMeta - @abstractmethod def optimize(self, **kwargs): r"""Maximize a function f(x), represented by an implementation of OptimizableInterface. diff --git a/moe/optimal_learning/python/python_version/expected_improvement.py b/moe/optimal_learning/python/python_version/expected_improvement.py index daf87f23..6e695745 100644 --- a/moe/optimal_learning/python/python_version/expected_improvement.py +++ b/moe/optimal_learning/python/python_version/expected_improvement.py @@ -364,7 +364,7 @@ def multivar_norm_cdf(upper, cov_matrix): sum_term += cov_k[0, k] * singlevar_norm_pdf(m_k[0], cov_k[0, 0], b_k[0]) else: for i in range(0, num_points): - index_no_i = range(0, i) + range(i + 1, num_points) + index_no_i = list(range(0, i)) + list(range(i + 1, num_points)) # c_k introduced on top of page 4 c_k = (b_k - m_k) - (b_k[i] - m_k[i]) * cov_k[i, :] / cov_k[i, i] @@ -595,7 +595,7 @@ def _compute_grad_expected_improvement_monte_carlo_naive(self, mu_star, var_star winner = numpy.argmax(improvements_this_iter) if winner < self.num_to_sample: aggregate_dx[winner, ...] -= grad_mu[winner, ...] - for diff_index in xrange(self.num_to_sample): + for diff_index in range(self.num_to_sample): # grad_chol_decomp_{diff_index, winner, i, j} * normal_draws_{i} aggregate_dx[diff_index, ...] -= numpy.dot(grad_chol_decomp[diff_index, winner, ...].T, normal_draws) @@ -700,9 +700,9 @@ def _compute_grad_expected_improvement_monte_carlo(self, mu_star, var_star, grad # will never be very large. # This tradeoff may change when GH-60 is done. grad_chol_decomp_tiled = numpy.empty((normals_compressed.shape[0], grad_chol_decomp.shape[2], grad_chol_decomp.shape[3])) - for diff_index in xrange(self.num_to_sample): + for diff_index in range(self.num_to_sample): grad_chol_decomp_tiled[...] = 0.0 - for i in xrange(num_points): + for i in range(num_points): # Only track the iterations where point i had the best improvement (winner) winner_indexes_equal_to_i = winner_indexes_tiled_equal_to_diff_index[i, ...] diff --git a/moe/optimal_learning/python/python_version/gaussian_process.py b/moe/optimal_learning/python/python_version/gaussian_process.py index c6708027..19838017 100644 --- a/moe/optimal_learning/python/python_version/gaussian_process.py +++ b/moe/optimal_learning/python/python_version/gaussian_process.py @@ -308,7 +308,7 @@ def compute_grad_variance_of_points(self, points_to_sample, num_derivatives=-1): """ num_derivatives = self._clamp_num_derivatives(points_to_sample.shape[0], num_derivatives) grad_var = numpy.empty((num_derivatives, points_to_sample.shape[0], points_to_sample.shape[0], self.dim)) - for i in xrange(num_derivatives): + for i in range(num_derivatives): grad_var[i, ...] = self._compute_grad_variance_of_points_per_point(points_to_sample, i) return grad_var @@ -336,20 +336,20 @@ def _compute_grad_cholesky_variance_of_points_per_point(self, points_to_sample, # Compute grad cholesky # Zero out the upper half of the matrix grad_chol = self._compute_grad_variance_of_points_per_point(points_to_sample, var_of_grad) - for i in xrange(num_to_sample): - for j in xrange(num_to_sample): + for i in range(num_to_sample): + for j in range(num_to_sample): if i < j: grad_chol[i, j, ...] = numpy.zeros(self.dim) # Step 2 of Appendix 2 - for k in xrange(num_to_sample): + for k in range(num_to_sample): L_kk = chol_var[k, k] if L_kk > MINIMUM_STD_DEV_GRAD_CHOLESKY: grad_chol[k, k, ...] *= 0.5 / L_kk - for j in xrange(k + 1, num_to_sample): + for j in range(k + 1, num_to_sample): grad_chol[j, k, ...] = (grad_chol[j, k, ...] - chol_var[j, k] * grad_chol[k, k, ...]) / L_kk - for j in xrange(k + 1, num_to_sample): - for i in xrange(j, num_to_sample): + for j in range(k + 1, num_to_sample): + for i in range(j, num_to_sample): grad_chol[i, j, ...] += -grad_chol[i, k, ...] * chol_var[j, k] - chol_var[i, k] * grad_chol[j, k, ...] return grad_chol @@ -394,7 +394,7 @@ def compute_grad_cholesky_variance_of_points(self, points_to_sample, chol_var=No chol_var = scipy.linalg.cho_factor(var_star, lower=True, overwrite_a=True)[0] grad_chol_decomp = numpy.empty((num_derivatives, points_to_sample.shape[0], points_to_sample.shape[0], self.dim)) - for i in xrange(num_derivatives): + for i in range(num_derivatives): grad_chol_decomp[i, ...] = self._compute_grad_cholesky_variance_of_points_per_point(points_to_sample, chol_var, i) return grad_chol_decomp diff --git a/moe/optimal_learning/python/python_version/log_likelihood.py b/moe/optimal_learning/python/python_version/log_likelihood.py index 324fa03f..06b60a90 100644 --- a/moe/optimal_learning/python/python_version/log_likelihood.py +++ b/moe/optimal_learning/python/python_version/log_likelihood.py @@ -331,7 +331,7 @@ def compute_grad_log_likelihood(self): self._points_sampled, ) grad_log_marginal = numpy.empty(self.num_hyperparameters) - for k in xrange(self.num_hyperparameters): + for k in range(self.num_hyperparameters): grad_cov_block = grad_hyperparameter_cov_matrix[..., k] # computing 0.5 * \alpha^T * grad_hyperparameter_cov_matrix * \alpha, where \alpha = K^-1 * y (aka K_inv_y) # temp_vec := grad_hyperparameter_cov_matrix * K_inv_y diff --git a/moe/optimal_learning/python/repeated_domain.py b/moe/optimal_learning/python/repeated_domain.py index 638cd2b5..4a91871d 100644 --- a/moe/optimal_learning/python/repeated_domain.py +++ b/moe/optimal_learning/python/repeated_domain.py @@ -90,7 +90,7 @@ def get_constraint_list(self): """ constraints = [] - for i in xrange(self.num_repeats): + for i in range(self.num_repeats): # Using start_index, start each domain at the correct index when flattening out points in COBYLA. constraints.extend(self._domain.get_constraint_list(start_index=self.dim * i)) return constraints @@ -109,7 +109,7 @@ def generate_random_point_in_domain(self, random_source=None): """ return numpy.array([self._domain.generate_random_point_in_domain(random_source=random_source) - for _ in xrange(self.num_repeats)]) + for _ in range(self.num_repeats)]) def generate_uniform_random_points_in_domain(self, num_points, random_source=None): r"""Generate AT MOST ``num_points`` uniformly distributed points from the domain. @@ -139,7 +139,7 @@ def generate_uniform_random_points_in_domain(self, num_points, random_source=Non # Then we "transpose" the output ordering: the i-th point in RepeatedDomain is constructed # from the i-th points of LHC_1 ... LHC_{num_repeats} num_points_array = numpy.empty(self.num_repeats, dtype=numpy.int64) - for i in xrange(self.num_repeats): + for i in range(self.num_repeats): temp = self._domain.generate_uniform_random_points_in_domain(num_points, random_source=random_source) # Since generate_uniform_random_points_in_domain() may not always return num_points # points, we need to make sure we only use the valid results @@ -178,4 +178,4 @@ def compute_update_restricted_to_domain(self, max_relative_change, current_point max_relative_change, current_point[i, ...], update_vector[i, ...]) - for i in xrange(self.num_repeats)]) + for i in range(self.num_repeats)]) diff --git a/moe/tests/bandit/data_containers_test.py b/moe/tests/bandit/data_containers_test.py index 06b52de3..03df8133 100644 --- a/moe/tests/bandit/data_containers_test.py +++ b/moe/tests/bandit/data_containers_test.py @@ -28,7 +28,7 @@ class TestDataContainers(BanditTestCase): def test_sample_arm_str(self): """Test SampleArm's __str__ overload operator.""" for historical_info in self.historical_infos_to_test: - for arm in historical_info.arms_sampled.itervalues(): + for arm in historical_info.arms_sampled.values(): assert str(arm) == pprint.pformat(arm.json_payload()) def test_sample_arm_add(self): diff --git a/moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py b/moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py index acefc1d2..1c5224f4 100644 --- a/moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py +++ b/moe/tests/optimal_learning/python/cpp_wrappers/expected_improvement_test.py @@ -77,7 +77,7 @@ def test_python_and_cpp_return_same_1d_analytic_ei_and_gradient(self): cpp_gp = moe.optimal_learning.python.cpp_wrappers.gaussian_process.GaussianProcess(cpp_cov, historical_data) cpp_ei_eval = moe.optimal_learning.python.cpp_wrappers.expected_improvement.ExpectedImprovement(cpp_gp, points_to_sample) - for _ in xrange(num_tests_per_case): + for _ in range(num_tests_per_case): points_to_sample = domain.generate_random_point_in_domain() cpp_ei_eval.current_point = points_to_sample python_ei_eval.current_point = points_to_sample diff --git a/moe/tests/optimal_learning/python/cpp_wrappers/gaussian_process_test.py b/moe/tests/optimal_learning/python/cpp_wrappers/gaussian_process_test.py index 87013804..837693d0 100644 --- a/moe/tests/optimal_learning/python/cpp_wrappers/gaussian_process_test.py +++ b/moe/tests/optimal_learning/python/cpp_wrappers/gaussian_process_test.py @@ -40,12 +40,12 @@ def test_sample_point_from_gp(self): gaussian_process = GaussianProcess(covariance, historical_data) out_values = numpy.zeros(3) - for i in xrange(3): + for i in range(3): out_values[i] = gaussian_process.sample_point_from_gp(point_two.point, 0.001) gaussian_process._gaussian_process.reset_to_most_recent_seed() out_values_test = numpy.ones(3) - for i in xrange(3): + for i in range(3): out_values_test[i] = gaussian_process.sample_point_from_gp(point_two.point, 0.001) # Exact match b/c we should've run over the exact same computations @@ -100,7 +100,7 @@ def test_python_and_cpp_return_same_mu_and_gradient(self): cpp_gp = GaussianProcess(cpp_cov, historical_data) for num_to_sample in self.num_to_sample_list: - for _ in xrange(num_tests_per_case): + for _ in range(num_tests_per_case): points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample) cpp_mu = cpp_gp.compute_mean_of_points(points_to_sample) @@ -125,7 +125,7 @@ def test_python_and_cpp_return_same_variance_and_gradient(self): cpp_gp = GaussianProcess(cpp_cov, historical_data) for num_to_sample in self.num_to_sample_list: - for _ in xrange(num_tests_per_case): + for _ in range(num_tests_per_case): points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample) cpp_var = cpp_gp.compute_variance_of_points(points_to_sample) @@ -151,7 +151,7 @@ def test_python_and_cpp_return_same_cholesky_variance_and_gradient(self): cpp_gp = GaussianProcess(cpp_cov, historical_data) for num_to_sample in self.num_to_sample_list: - for _ in xrange(num_tests_per_case): + for _ in range(num_tests_per_case): points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample) cpp_var = cpp_gp.compute_cholesky_variance_of_points(points_to_sample) diff --git a/moe/tests/optimal_learning/python/gaussian_process_test_utils.py b/moe/tests/optimal_learning/python/gaussian_process_test_utils.py index 0e5af22b..e4456162 100644 --- a/moe/tests/optimal_learning/python/gaussian_process_test_utils.py +++ b/moe/tests/optimal_learning/python/gaussian_process_test_utils.py @@ -29,7 +29,7 @@ def fill_random_covariance_hyperparameters(hyperparameter_interval, num_hyperpar """ hyper = [numpy.random.uniform(hyperparameter_interval.min, hyperparameter_interval.max) - for _ in xrange(num_hyperparameters)] + for _ in range(num_hyperparameters)] return covariance_type(hyper) diff --git a/moe/tests/optimal_learning/python/geometry_utils_test.py b/moe/tests/optimal_learning/python/geometry_utils_test.py index 4e5eb3bf..e74cecc8 100644 --- a/moe/tests/optimal_learning/python/geometry_utils_test.py +++ b/moe/tests/optimal_learning/python/geometry_utils_test.py @@ -60,7 +60,7 @@ def test_latin_hypercube_within_domain(self): def test_make_rand_point_within_domain(self): """Test that domain.generate_random_point_in_domain returns a point in the domain.""" for domain in self.domains_to_test: - for _ in xrange(10): + for _ in range(10): point = domain.generate_random_point_in_domain() assert domain.check_point_inside(point) is True @@ -77,7 +77,7 @@ def test_latin_hypercube_equally_spaced(self): domain_bounds = domain._domain_bounds points = generate_latin_hypercube_points(num_points, domain_bounds) - for dim in xrange(domain.dim): + for dim in range(domain.dim): # This size of each slice sub_domain_width = domain_bounds[dim].length / float(num_points) # Sort in dim dimension diff --git a/moe/tests/optimal_learning/python/optimal_learning_test_case.py b/moe/tests/optimal_learning/python/optimal_learning_test_case.py index 6182a8c5..da3e89b6 100644 --- a/moe/tests/optimal_learning/python/optimal_learning_test_case.py +++ b/moe/tests/optimal_learning/python/optimal_learning_test_case.py @@ -84,8 +84,8 @@ def assert_points_distinct(point_list, tol): """ __tracebackhide__ = True - for i in xrange(point_list.shape[0]): - for j in xrange(i + 1, point_list.shape[0]): + for i in range(point_list.shape[0]): + for j in range(i + 1, point_list.shape[0]): temp = point_list[i, ...] - point_list[j, ...] dist = numpy.linalg.norm(temp) OptimalLearningTestCase.assert_scalar_within_relative(dist, 0.0, tol) diff --git a/moe/tests/optimal_learning/python/python_version/covariance_test.py b/moe/tests/optimal_learning/python/python_version/covariance_test.py index fcdbed1c..74962964 100644 --- a/moe/tests/optimal_learning/python/python_version/covariance_test.py +++ b/moe/tests/optimal_learning/python/python_version/covariance_test.py @@ -180,7 +180,7 @@ def test_hyperparameter_gradient_pings(self): points1 = domain.generate_uniform_random_points_in_domain(num_tests) points2 = domain.generate_uniform_random_points_in_domain(num_tests) - for i in xrange(num_tests): + for i in range(num_tests): point_one = points1[i, ...] point_two = points2[i, ...] @@ -191,7 +191,7 @@ def test_hyperparameter_gradient_pings(self): ) analytic_grad = covariance.hyperparameter_grad_covariance(point_one, point_two) - for k in xrange(covariance.num_hyperparameters): + for k in range(covariance.num_hyperparameters): hyperparameters_old = covariance.hyperparameters # hyperparamter + h diff --git a/moe/tests/optimal_learning/python/python_version/expected_improvement_test.py b/moe/tests/optimal_learning/python/python_version/expected_improvement_test.py index 514f77ca..750c005e 100644 --- a/moe/tests/optimal_learning/python/python_version/expected_improvement_test.py +++ b/moe/tests/optimal_learning/python/python_version/expected_improvement_test.py @@ -446,7 +446,7 @@ def test_qd_and_1d_return_same_analytic_ei(self): points_to_sample = domain.generate_random_point_in_domain() python_ei_eval = moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement(python_gp, points_to_sample) - for _ in xrange(num_tests_per_case): + for _ in range(num_tests_per_case): points_to_sample = domain.generate_random_point_in_domain() python_ei_eval.current_point = points_to_sample diff --git a/moe/tests/optimal_learning/python/python_version/log_likelihood_test.py b/moe/tests/optimal_learning/python/python_version/log_likelihood_test.py index e1499ccf..536a7c04 100644 --- a/moe/tests/optimal_learning/python/python_version/log_likelihood_test.py +++ b/moe/tests/optimal_learning/python/python_version/log_likelihood_test.py @@ -61,7 +61,7 @@ def test_grad_log_likelihood_pings(self): lml = GaussianProcessLogMarginalLikelihood(python_cov, historical_data) analytic_grad = lml.compute_grad_log_likelihood() - for k in xrange(lml.num_hyperparameters): + for k in range(lml.num_hyperparameters): hyperparameters_old = lml.hyperparameters # hyperparamter + h diff --git a/moe/tests/views/rest/bandit_test.py b/moe/tests/views/rest/bandit_test.py index c2c12612..d15eab80 100644 --- a/moe/tests/views/rest/bandit_test.py +++ b/moe/tests/views/rest/bandit_test.py @@ -48,16 +48,16 @@ def _test_interface_returns_as_expected(self): for subtype in BANDIT_ENDPOINTS_TO_SUBTYPES[self._endpoint]: for historical_info in self._historical_infos: json_payload = self._build_json_payload(subtype, historical_info) - arm_names = set([arm_name for arm_name in historical_info.arms_sampled.iterkeys()]) + arm_names = set([arm_name for arm_name in historical_info.arms_sampled.keys()]) resp = self.testapp.post(self._moe_route.endpoint, json_payload) resp_schema = BanditResponse() resp_dict = resp_schema.deserialize(json.loads(resp.body)) - resp_arm_names = set([arm_name for arm_name in resp_dict['arm_allocations'].iterkeys()]) + resp_arm_names = set([arm_name for arm_name in resp_dict['arm_allocations'].keys()]) assert arm_names == resp_arm_names # The allocations should be in range [0, 1] # The sum of all allocations should be 1.0. total_allocation = 0 - for allocation in resp_dict['arm_allocations'].itervalues(): + for allocation in resp_dict['arm_allocations'].values(): assert allocation >= 0 assert allocation <= 1 total_allocation += allocation diff --git a/moe/tests/views/rest/gp_hyper_opt_test.py b/moe/tests/views/rest/gp_hyper_opt_test.py index 5bde3a2d..5250da0e 100644 --- a/moe/tests/views/rest/gp_hyper_opt_test.py +++ b/moe/tests/views/rest/gp_hyper_opt_test.py @@ -100,7 +100,7 @@ def test_optimizer_params_passed_through(self): assert optimizer_parameters._get_member_dict() == test_param_dict # Test arbitrary parameters get passed through - for i, key in enumerate(test_param_dict.iterkeys()): + for i, key in enumerate(test_param_dict.keys()): test_param_dict[key] /= 2 test_num_multistarts = test_param_dict.pop('num_multistarts') diff --git a/moe/tests/views/rest/gp_next_points_test.py b/moe/tests/views/rest/gp_next_points_test.py index 3de569d2..f9092960 100644 --- a/moe/tests/views/rest/gp_next_points_test.py +++ b/moe/tests/views/rest/gp_next_points_test.py @@ -87,7 +87,7 @@ def test_optimizer_params_passed_through(self): assert optimizer_parameters._get_member_dict() == test_param_dict # Test arbitrary parameters get passed through - for i, key in enumerate(test_param_dict.iterkeys()): + for i, key in enumerate(test_param_dict.keys()): test_param_dict[key] /= 2 test_num_multistarts = test_param_dict.pop('num_multistarts') diff --git a/moe/views/optimizable_gp_pretty_view.py b/moe/views/optimizable_gp_pretty_view.py index 269d7f34..46c8c6cb 100644 --- a/moe/views/optimizable_gp_pretty_view.py +++ b/moe/views/optimizable_gp_pretty_view.py @@ -75,7 +75,7 @@ def get_params_from_request(self): # Override the defaults with information that may be in the optimizer parameters optimizer_parameters_dict = default_optimizer_parameters.optimizer_parameters._asdict() if params['optimizer_info']['optimizer_parameters']: - for param, val in params['optimizer_info']['optimizer_parameters'].iteritems(): + for param, val in params['optimizer_info']['optimizer_parameters'].items(): optimizer_parameters_dict[param] = val # Find the schema class that corresponds to the ``optimizer_type`` of the request diff --git a/moe/views/rest/gp_next_points_constant_liar.py b/moe/views/rest/gp_next_points_constant_liar.py index c6c64dd9..683a78cc 100644 --- a/moe/views/rest/gp_next_points_constant_liar.py +++ b/moe/views/rest/gp_next_points_constant_liar.py @@ -60,7 +60,7 @@ def get_lie_value(self, params): elif params.get('lie_method') == CONSTANT_LIAR_MEAN: return numpy.mean(points_sampled_values) else: - raise(NotImplementedError, '{0} is not implemented'.format(params.get('lie_method'))) + raise NotImplementedError @view_config(route_name=_route_name, renderer='json', request_method='POST') def gp_next_points_constant_liar_view(self): diff --git a/moe/views/schemas/bandit_pretty_view.py b/moe/views/schemas/bandit_pretty_view.py index 65ecd999..e56d1974 100644 --- a/moe/views/schemas/bandit_pretty_view.py +++ b/moe/views/schemas/bandit_pretty_view.py @@ -31,7 +31,7 @@ def validator(self, node, cstruct): """ total_allocation = 0.0 - for arm_name, allocation in cstruct.iteritems(): + for arm_name, allocation in cstruct.items(): total_allocation += allocation if not 0.0 <= allocation <= 1.0: raise colander.Invalid(node, msg='Allocation = {:f} must be in range [0,1].'.format(allocation)) @@ -59,12 +59,12 @@ def validator(self, node, cstruct): :type cstruct: dictionary of (arm name, SingleArm) key-value pairs """ - for arm_name, sample_arm in cstruct.iteritems(): + for arm_name, sample_arm in cstruct.items(): if 'loss' not in sample_arm: sample_arm['loss'] = 0 if 'variance' not in sample_arm: sample_arm['variance'] = None - if not (set(sample_arm.keys()) == set(map(lambda s: s.lstrip('_'), SampleArm.__slots__))): + if not (set(sample_arm.keys()) == set([s.lstrip('_') for s in SampleArm.__slots__])): raise colander.Invalid(node, msg='Value = {:s} must be a valid SampleArm.'.format(sample_arm)) SampleArm(sample_arm['win'], sample_arm['loss'], sample_arm['total'], sample_arm['variance']) diff --git a/moe/views/utils.py b/moe/views/utils.py index 4b264a88..94f8db47 100644 --- a/moe/views/utils.py +++ b/moe/views/utils.py @@ -160,7 +160,7 @@ def _make_bandit_historical_info_from_params(params, arm_type=SampleArm): """ arms_sampled = {} # Load up the info - for arm_name, sampled_arm in params.get("historical_info").get("arms_sampled").iteritems(): + for arm_name, sampled_arm in params.get("historical_info").get("arms_sampled").items(): arms_sampled[arm_name] = arm_type(win=sampled_arm.get("win"), loss=sampled_arm.get("loss", 0), total=sampled_arm.get("total"), variance=sampled_arm.get("variance", None)) bandit_historical_info = BanditHistoricalData(sample_arms=arms_sampled)