Skip to content

Use ufmt for code formatting and import sorting #2262

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Feb 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/run_test_suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ jobs:
run: |
pip install flake8==4.0.1 flake8-print==4.0.0 pre-commit
pre-commit install
pre-commit run seed-isort-config || true
- name: Run linting
run: |
flake8
Expand Down
2 changes: 0 additions & 2 deletions .isort.cfg

This file was deleted.

17 changes: 6 additions & 11 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,14 @@ repos:
- id: flake8
args: [--config=setup.cfg]
exclude: ^(examples/*)|(docs/*)
- repo: https://github.com/ambv/black
rev: 22.3.0
- repo: https://github.com/omnilib/ufmt
rev: v2.0.0
hooks:
- id: black
- id: ufmt
additional_dependencies:
- black == 22.3.0
- usort == 1.0.3
exclude: ^(build/*)|(docs/*)|(examples/*)
args: [-l 120, --target-version=py37]
- repo: https://github.com/pre-commit/mirrors-isort
rev: v5.10.1
hooks:
- id: isort
language_version: python3
exclude: ^(build/*)|(docs/*)|(examples/*)
args: [-w120, -m3, --tc, --project=gpytorch]
- repo: https://github.com/jumanjihouse/pre-commit-hooks
rev: 2.1.6
hooks:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ to the general style guidelines enforced by the repo. To do this, navigate to yo
pip install pre-commit
pre-commit install
```
From then on, this will automatically run flake8, isort, black and other tools over the files you commit each time you commit to gpytorch or a fork of it.
From then on, this will automatically run flake8, µfmt (black + µsort), and other tools over the files you commit each time you commit to gpytorch or a fork of it.

## The Team

Expand Down
3 changes: 2 additions & 1 deletion examples/06_PyTorch_NN_Integration_DKL/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
# This implementation is based on the DenseNet-BC implementation in torchvision
# https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py

from collections import OrderedDict

import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict


class _DenseLayer(nn.Sequential):
Expand Down
11 changes: 6 additions & 5 deletions examples/LBFGS.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import torch
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
from copy import deepcopy
from functools import reduce

import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.optim import Optimizer


Expand Down Expand Up @@ -85,7 +86,7 @@ def polyinterp(points, x_min_bound=None, x_max_bound=None, plot=False):
# d2 = sqrt(d1^2 - g1*g2)
# x_min = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))
d1 = points[0, 2] + points[1, 2] - 3 * ((points[0, 1] - points[1, 1]) / (points[0, 0] - points[1, 0]))
d2 = np.sqrt(d1 ** 2 - points[0, 2] * points[1, 2])
d2 = np.sqrt(d1**2 - points[0, 2] * points[1, 2])
if np.isreal(d2):
x_sol = points[1, 0] - (points[1, 0] - points[0, 0]) * (
(points[1, 2] + d2 - d1) / (points[1, 2] - points[0, 2] + 2 * d2)
Expand Down
2 changes: 1 addition & 1 deletion gpytorch/constraints/constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from typing import Optional

import torch
from torch import Tensor, sigmoid
from torch import sigmoid, Tensor
from torch.nn import Module

from ..utils.transforms import _get_inv_param_transform, inv_sigmoid, inv_softplus
Expand Down
4 changes: 1 addition & 3 deletions gpytorch/kernels/keops/rbf_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@ class RBFKernel(KeOpsKernel):
has_lengthscale = True

def _nonkeops_covar_func(self, x1, x2, diag=False):
return postprocess_rbf(
self.covar_dist(x1, x2, square_dist=True, diag=diag)
)
return postprocess_rbf(self.covar_dist(x1, x2, square_dist=True, diag=diag))

def covar_func(self, x1, x2, diag=False):
# We only should use KeOps on big kernel matrices
Expand Down
2 changes: 1 addition & 1 deletion gpytorch/kernels/rbf_kernel_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch
from linear_operator.operators import KroneckerProductLinearOperator

from .rbf_kernel import RBFKernel, postprocess_rbf
from .rbf_kernel import postprocess_rbf, RBFKernel


class RBFKernelGrad(RBFKernel):
Expand Down
6 changes: 3 additions & 3 deletions gpytorch/likelihoods/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,16 @@
from .bernoulli_likelihood import BernoulliLikelihood
from .beta_likelihood import BetaLikelihood
from .gaussian_likelihood import (
_GaussianLikelihoodBase,
DirichletClassificationLikelihood,
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
GaussianLikelihoodWithMissingObs,
_GaussianLikelihoodBase,
)
from .laplace_likelihood import LaplaceLikelihood
from .likelihood import Likelihood, _OneDimensionalLikelihood
from .likelihood import _OneDimensionalLikelihood, Likelihood
from .likelihood_list import LikelihoodList
from .multitask_gaussian_likelihood import MultitaskGaussianLikelihood, _MultitaskGaussianLikelihoodBase
from .multitask_gaussian_likelihood import _MultitaskGaussianLikelihoodBase, MultitaskGaussianLikelihood
from .noise_models import HeteroskedasticNoise
from .softmax_likelihood import SoftmaxLikelihood
from .student_t_likelihood import StudentTLikelihood
Expand Down
2 changes: 1 addition & 1 deletion gpytorch/likelihoods/gaussian_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from linear_operator.operators import ZeroLinearOperator
from torch import Tensor

from ..distributions import MultivariateNormal, base_distributions
from ..distributions import base_distributions, MultivariateNormal
from ..utils.warnings import GPInputWarning
from .likelihood import Likelihood
from .noise_models import FixedGaussianNoise, HomoskedasticNoise, Noise
Expand Down
2 changes: 1 addition & 1 deletion gpytorch/likelihoods/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import torch

from .. import settings
from ..distributions import MultivariateNormal, base_distributions
from ..distributions import base_distributions, MultivariateNormal
from ..module import Module
from ..utils.quadrature import GaussHermiteQuadrature1D
from ..utils.warnings import GPInputWarning
Expand Down
4 changes: 2 additions & 2 deletions gpytorch/likelihoods/multitask_gaussian_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
from torch import Tensor

from ..constraints import GreaterThan
from ..distributions import MultitaskMultivariateNormal, base_distributions
from ..distributions import base_distributions, MultitaskMultivariateNormal
from ..lazy import LazyEvaluatedKernelTensor
from ..likelihoods import Likelihood, _GaussianLikelihoodBase
from ..likelihoods import _GaussianLikelihoodBase, Likelihood
from ..module import Module
from ..priors import Prior

Expand Down
2 changes: 1 addition & 1 deletion gpytorch/likelihoods/softmax_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import torch

from ..distributions import Distribution, MultitaskMultivariateNormal, base_distributions
from ..distributions import base_distributions, Distribution, MultitaskMultivariateNormal
from .likelihood import Likelihood


Expand Down
2 changes: 1 addition & 1 deletion gpytorch/priors/horseshoe_prior.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from numbers import Number

import torch
from torch.distributions import HalfCauchy, Normal, constraints
from torch.distributions import constraints, HalfCauchy, Normal
from torch.nn import Module as TModule

from gpytorch.priors.prior import Prior
Expand Down
2 changes: 1 addition & 1 deletion gpytorch/priors/lkj_prior.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import torch
from linear_operator.utils.cholesky import psd_safe_cholesky
from torch.distributions import LKJCholesky, constraints
from torch.distributions import constraints, LKJCholesky
from torch.nn import Module as TModule

from .. import settings
Expand Down
3 changes: 1 addition & 2 deletions gpytorch/utils/interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@
from typing import List

import torch
from linear_operator.utils.interpolation import left_interp as _left_interp
from linear_operator.utils.interpolation import left_t_interp as _left_t_interp
from linear_operator.utils.interpolation import left_interp as _left_interp, left_t_interp as _left_t_interp

from .grid import convert_legacy_grid

Expand Down
2 changes: 1 addition & 1 deletion gpytorch/variational/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
)
from .lmc_variational_strategy import LMCVariationalStrategy
from .mean_field_variational_distribution import MeanFieldVariationalDistribution
from .natural_variational_distribution import NaturalVariationalDistribution, _NaturalVariationalDistribution
from .natural_variational_distribution import _NaturalVariationalDistribution, NaturalVariationalDistribution
from .nearest_neighbor_variational_strategy import NNVariationalStrategy
from .orthogonally_decoupled_variational_strategy import OrthogonallyDecoupledVariationalStrategy
from .tril_natural_variational_distribution import TrilNaturalVariationalDistribution
Expand Down
6 changes: 6 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@
requires = ["setuptools", "setuptools-scm", "wheel"]
build-backend = "setuptools.build_meta"

[tool.black]
line-length = 120

[tool.usort.known]
first_party = ["gpytorch"]

[tool.setuptools_scm]
local_scheme = "node-and-date"
write_to = "./gpytorch/version.py"
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
linear_operator>=0.2.0
scikit-learn
torch>=1.11
black==22.3.0
ufmt==2.0.0
usort==1.0.3
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def find_version(*file_paths):
python_requires=">=3.8",
install_requires=install_requires,
extras_require={
"dev": ["black", "twine", "pre-commit"],
"dev": ["ufmt", "twine", "pre-commit"],
"examples": ["ipython", "jupyter", "matplotlib", "scipy", "torchvision", "tqdm"],
"pyro": ["pyro-ppl>=1.8"],
"keops": ["pykeops>=1.1.1"],
Expand Down
5 changes: 3 additions & 2 deletions test/examples/test_batch_decoupled_ppgpr_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
import unittest
from unittest.mock import MagicMock, patch

import gpytorch
import linear_operator
import torch
from torch import optim

import gpytorch
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.models import ApproximateGP
from gpytorch.test.base_test_case import BaseTestCase
from torch import optim


def train_data():
Expand Down
5 changes: 3 additions & 2 deletions test/examples/test_batch_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
import random
import unittest

import gpytorch
import torch
from torch import optim

import gpytorch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from torch import optim


# Batch training test: Let's learn hyperparameters on a sine dataset, but test on a sine dataset and a cosine dataset
Expand Down
13 changes: 7 additions & 6 deletions test/examples/test_batch_multitask_gp_regression.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
#!/usr/bin/env python3

import math
import os
import random
import math
import torch
import unittest

import gpytorch
import torch
from torch import optim
from gpytorch.kernels import RBFKernel, MultitaskKernel
from gpytorch.means import ConstantMean, MultitaskMean
from gpytorch.likelihoods import MultitaskGaussianLikelihood

import gpytorch
from gpytorch.distributions import MultitaskMultivariateNormal
from gpytorch.kernels import MultitaskKernel, RBFKernel
from gpytorch.likelihoods import MultitaskGaussianLikelihood
from gpytorch.means import ConstantMean, MultitaskMean


# Batch training test: Let's learn hyperparameters on a sine dataset, but test on a sine dataset and a cosine dataset
Expand Down
9 changes: 4 additions & 5 deletions test/examples/test_batch_svgp_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
import unittest
from math import pi

import gpytorch
import torch
from torch import optim

import gpytorch
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.models import ApproximateGP
from gpytorch.test.utils import least_used_cuda_device
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
from torch import optim


def train_data(cuda=False):
Expand All @@ -34,9 +35,7 @@ def __init__(self, inducing_points):
inducing_points.size(-2), batch_shape=torch.Size([2])
)
variational_strategy = VariationalStrategy(
self, inducing_points, variational_distribution,
learn_inducing_locations=True,
jitter_val=1e-3
self, inducing_points, variational_distribution, learn_inducing_locations=True, jitter_val=1e-3
)
super(SVGPRegressionModel, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.ConstantMean()
Expand Down
5 changes: 3 additions & 2 deletions test/examples/test_decoupled_svgp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
import unittest
from unittest.mock import MagicMock, patch

import gpytorch
import linear_operator
import torch
from torch import optim

import gpytorch
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.models import ApproximateGP
from gpytorch.test.base_test_case import BaseTestCase
from torch import optim


def train_data():
Expand Down
13 changes: 7 additions & 6 deletions test/examples/test_dspp_regression.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import gpytorch
import torch
import unittest

import torch

import gpytorch
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean, LinearMean
from gpytorch.kernels import ScaleKernel, MaternKernel
from gpytorch.variational import VariationalStrategy
from gpytorch.variational import MeanFieldVariationalDistribution
from gpytorch.models.deep_gps.dspp import DSPPLayer, DSPP
from gpytorch.models.deep_gps.dspp import DSPP, DSPPLayer
from gpytorch.test.base_test_case import BaseTestCase
from gpytorch.variational import MeanFieldVariationalDistribution, VariationalStrategy


train_n = 20
Expand Down
5 changes: 3 additions & 2 deletions test/examples/test_fixed_noise_fanatasy_updates.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
import unittest
from math import exp, pi

import gpytorch
import torch
from torch import optim

import gpytorch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
Expand All @@ -13,7 +15,6 @@
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.base_test_case import BaseTestCase
from gpytorch.test.utils import least_used_cuda_device
from torch import optim


class ExactGPModel(gpytorch.models.ExactGP):
Expand Down
Loading