|
13 | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 | 14 | # See the License for the specific language governing permissions and
|
15 | 15 | # limitations under the License.
|
| 16 | + |
| 17 | +"""Use this script to get a quick summary of your system config. |
| 18 | +It should be able to run without any of LeRobot's dependencies or LeRobot itself installed. |
| 19 | +""" |
| 20 | + |
16 | 21 | import platform
|
17 | 22 |
|
18 |
| -import huggingface_hub |
| 23 | +HAS_HF_HUB = True |
| 24 | +HAS_HF_DATASETS = True |
| 25 | +HAS_NP = True |
| 26 | +HAS_TORCH = True |
| 27 | +HAS_LEROBOT = True |
| 28 | + |
| 29 | +try: |
| 30 | + import huggingface_hub |
| 31 | +except ImportError: |
| 32 | + HAS_HF_HUB = False |
| 33 | + |
| 34 | +try: |
| 35 | + import datasets |
| 36 | +except ImportError: |
| 37 | + HAS_HF_DATASETS = False |
| 38 | + |
| 39 | +try: |
| 40 | + import numpy as np |
| 41 | +except ImportError: |
| 42 | + HAS_NP = False |
| 43 | + |
| 44 | +try: |
| 45 | + import torch |
| 46 | +except ImportError: |
| 47 | + HAS_TORCH = False |
| 48 | + |
| 49 | +try: |
| 50 | + import lerobot |
| 51 | +except ImportError: |
| 52 | + HAS_LEROBOT = False |
19 | 53 |
|
20 |
| -# import dataset |
21 |
| -import numpy as np |
22 |
| -import torch |
23 | 54 |
|
24 |
| -from lerobot import __version__ as version |
| 55 | +lerobot_version = lerobot.__version__ if HAS_LEROBOT else "N/A" |
| 56 | +hf_hub_version = huggingface_hub.__version__ if HAS_HF_HUB else "N/A" |
| 57 | +hf_datasets_version = datasets.__version__ if HAS_HF_DATASETS else "N/A" |
| 58 | +np_version = np.__version__ if HAS_NP else "N/A" |
25 | 59 |
|
26 |
| -pt_version = torch.__version__ |
27 |
| -pt_cuda_available = torch.cuda.is_available() |
28 |
| -pt_cuda_available = torch.cuda.is_available() |
29 |
| -cuda_version = torch._C._cuda_getCompiledVersion() if torch.version.cuda is not None else "N/A" |
| 60 | +torch_version = torch.__version__ if HAS_TORCH else "N/A" |
| 61 | +torch_cuda_available = torch.cuda.is_available() if HAS_TORCH else "N/A" |
| 62 | +cuda_version = torch._C._cuda_getCompiledVersion() if HAS_TORCH and torch.version.cuda is not None else "N/A" |
30 | 63 |
|
31 | 64 |
|
32 | 65 | # TODO(aliberts): refactor into an actual command `lerobot env`
|
33 | 66 | def display_sys_info() -> dict:
|
34 | 67 | """Run this to get basic system info to help for tracking issues & bugs."""
|
35 | 68 | info = {
|
36 |
| - "`lerobot` version": version, |
| 69 | + "`lerobot` version": lerobot_version, |
37 | 70 | "Platform": platform.platform(),
|
38 | 71 | "Python version": platform.python_version(),
|
39 |
| - "Huggingface_hub version": huggingface_hub.__version__, |
40 |
| - # TODO(aliberts): Add dataset when https://github.com/huggingface/lerobot/pull/73 is merged |
41 |
| - # "Dataset version": dataset.__version__, |
42 |
| - "Numpy version": np.__version__, |
43 |
| - "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", |
| 72 | + "Huggingface_hub version": hf_hub_version, |
| 73 | + "Dataset version": hf_datasets_version, |
| 74 | + "Numpy version": np_version, |
| 75 | + "PyTorch version (GPU?)": f"{torch_version} ({torch_cuda_available})", |
44 | 76 | "Cuda version": cuda_version,
|
45 | 77 | "Using GPU in script?": "<fill in>",
|
46 |
| - "Using distributed or parallel set-up in script?": "<fill in>", |
| 78 | + # "Using distributed or parallel set-up in script?": "<fill in>", |
47 | 79 | }
|
48 |
| - print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") |
| 80 | + print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the last point.\n") |
49 | 81 | print(format_dict(info))
|
50 | 82 | return info
|
51 | 83 |
|
|
0 commit comments