diff --git a/projects/PCSegSAM2/Dockerfile b/projects/PCSegSAM2/Dockerfile
new file mode 100644
index 00000000..a24d438b
--- /dev/null
+++ b/projects/PCSegSAM2/Dockerfile
@@ -0,0 +1,95 @@
+ARG PYTORCH="2.3.1" # NOTE(knzo25): use 2.7.0 for blackwell
+ARG CUDA="12.1" # NOTE(knzo25): use 2.8.0 for blackwell
+ARG CUDNN="8" # NOTE(knzo25): use 9 for blackwell
+FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
+
+ARG MMCV="2.1.0"
+ARG MMENGINE="0.10.3"
+ARG MMDET="3.2.0"
+ARG MMDEPLOY="1.3.1"
+ARG MMDET3D="1.4.0"
+ARG MMPRETRAIN="1.2.0"
+ARG MMSEGMENTATION="1.2.2"
+
+ENV CUDA_HOME="/usr/local/cuda" \
+ FORCE_CUDA="1" \
+ TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.7 8.9+PTX" \
+ TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
+
+# Install apt dependencies for base library
+RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y --no-install-recommends \
+ curl \
+ ffmpeg \
+ git \
+ ninja-build \
+ libglib2.0-0 \
+ libsm6 \
+ libxext6 \
+ libxrender-dev \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install pip dependencies for base library
+RUN python3 -m pip --no-cache-dir install \
+ aenum \
+ gitpython \
+ nptyping \
+ numpy==1.23.5 \
+ nvidia-pyindex \
+ openmim \
+ nltk==3.8.1
+
+# Install mim components
+RUN mim install \
+ mmcv==${MMCV} \
+ mmdeploy==${MMDEPLOY} \
+ mmdet==${MMDET} \
+ mmdet3d==${MMDET3D} \
+ mmengine==${MMENGINE} \
+ mmpretrain[multimodal]==${MMPRETRAIN} \
+ mmsegmentation==${MMSEGMENTATION}
+
+# Install rerun
+RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y --no-install-recommends \
+ libgtk-3-dev \
+ libxkbcommon-x11-0
+RUN python3 -m pip --no-cache-dir install \
+ rerun-sdk==0.17.0
+
+# Install t4-devkit
+RUN python3 -m pip install git+https://github.com/tier4/t4-devkit@v0.0.7
+
+# NOTE(knzo25): this patch is needed to use numpy versions over 1.23.5 (version used in mmdet3d 1.4.0)
+# It can be safely deleted when mmdet3d updates the numpy version
+COPY .patches/mmdet3d.patch /tmp/mmdet3d.patch
+RUN cd $(python -c "import site; print(site.getsitepackages()[0])") \
+ && git apply < /tmp/mmdet3d.patch \
+ && rm -f /tmp/mmdet3d.patch \
+ && cd /
+
+ENV WGPU_BACKEND=gl
+
+WORKDIR /workspace
+
+COPY autoware_ml autoware_ml
+COPY pipelines pipelines
+COPY projects projects
+COPY tools tools
+COPY setup.py setup.py
+COPY README.md README.md
+
+RUN pip install --no-cache-dir -e .
+
+# SAM SPECIFIC ! (SAM2 uses torch >=2.3.1)
+RUN python -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121
+
+# Install essential Python packages
+RUN python -m pip install --upgrade pip setuptools wheel numpy \
+ opencv-python transformers supervision pycocotools addict yapf timm \
+ typing_extensions accelerate numba==0.61.1rc1
+
+# Install segment_anything package in editable mode
+RUN python -m pip install -e /workspace/projects/PCSegSAM2
+
+# Install grounding dino
+RUN python -m pip install --no-build-isolation -e /workspace/projects/PCSegSAM2/grounding_dino
diff --git a/projects/PCSegSAM2/LICENSE_cctorch b/projects/PCSegSAM2/LICENSE_cctorch
new file mode 100644
index 00000000..23da14a6
--- /dev/null
+++ b/projects/PCSegSAM2/LICENSE_cctorch
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2020, the respective contributors, as shown by the AUTHORS file.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/projects/PCSegSAM2/LICENSE_groundingdino b/projects/PCSegSAM2/LICENSE_groundingdino
new file mode 100644
index 00000000..f1460f5e
--- /dev/null
+++ b/projects/PCSegSAM2/LICENSE_groundingdino
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 - present, IDEA Research.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/projects/PCSegSAM2/LICENSE_sam2 b/projects/PCSegSAM2/LICENSE_sam2
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/projects/PCSegSAM2/LICENSE_sam2
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/projects/PCSegSAM2/README.md b/projects/PCSegSAM2/README.md
new file mode 100644
index 00000000..f3127a63
--- /dev/null
+++ b/projects/PCSegSAM2/README.md
@@ -0,0 +1,121 @@
+# PCSegSAM2
+
+This projects leverages SAM2 and grounding dino to segment pointclouds via intermediate image segmentation and pointcloud projection.
+The process can be divided in two steps:
+
+ - Image segmentation: Using SAM2 and grounding dino, images of the dabases are segmented
+ attempting to assign a label to all parts of the image
+ - Pointcloud segmentation via projection: Labels are assigned to points via projecting
+ them to all the cameras in the rig, using several time steps for consistency.
+
+This projects is applied to `t4dataset`, but can be easily generalized.
+`info` files are not required and the output of the pointcloud segmentation is not added into the `infos` for now.
+Instead, image and pointclouds segmentation results are saved alongside the dataset data.
+
+## Installation
+
+Note: due to SAM2 requirements, the image from this projects uses a different version of torch and other dependencies.
+
+Build the image:
+
+```bash
+DOCKER_BUILDKIT=1 docker build -t autoware-ml-sam2 -f projects/PCSegSAM2/Dockerfile . --progress=plain
+```
+
+To execute the container:
+
+```bash
+docker run -it --rm --gpus '"device=0"' --shm-size=64g --name awml -p 6006:6006 -v $PWD/:/workspace -v $PWD/data:/workspace/data autoware-ml-sam2
+```
+
+Before following to the next steps, the checkpoints for SAM2 and grounding dino need to be downloaded.
+
+```bash
+cd projects/PCSegSAM2/checkpoints
+bash download_ckpts.sh
+cd ../gdino_checkpoints
+bash download_ckpts.sh
+```
+
+## Generate SAM2 segmented images
+
+*NOTE: Although SAM2 has video segmentation, I did not have enough memory to test it.
+
+To segment the images of a dataset using SAM2, use:
+
+```bash
+python projects/PCSegSAM2/segment_t4dataset_sam2.py \
+ --root_path ./data/t4dataset \
+ --out_videos ./videos \
+ --dataset_config autoware_ml/configs/detection3d/dataset/t4dataset/base.py \
+ --segmentation_config projects/PCSegSAM2/config/t4dataset_segment.yaml
+```
+
+The `segmentation_config` specifies the specifics of `SAM2` including the specific model, checkpoints, and thresholds.
+The classes queries are aimed to specify as many as possible of the elements in a scene, since only non-background
+objects will be assigned valid labels in the pointclouds in the next step.
+
+Segmented images will be generated alongside the original images with the `_seg.png` suffix.
+
+## Generate segmented pointclouds using projection
+
+To generate segmented pointclouds use the following command:
+
+```bash
+python projects/PCSegSAM2/segment_t4dataset_projective.py \
+ --root_path ./data/t4dataset \
+ --database_config autoware_ml/configs/detection3d/dataset/t4dataset/xx1.py \
+ --segmentation_config projects/SAM2/config/t4dataset_segment.yaml
+```
+
+The classes that are used for pointcloud segmentation are a subset of the ones used in SAM2. This is due to the
+need for relatively need specifity in SAM2 queries to obtain a good sensitivity.
+
+For example, `greenery`, `bush`, and `trees` are all classes that we would like to classify as `vegetation` in a pointcloud.
+However, `SAM2` does not have a high sensitivity towards `vegetation`, for which reason in the previous step, many
+syonyms and related terms are required as queries.
+
+This script will project the pointcloud into the camera rig of the instance associated with the lidar, and those before
+and after it, controlled via `num_consistent_frames`.
+
+The rules for segmentation as as follow:
+ - Points that are not projected in any image are classified as invalid
+ - Points that are projected only into images at pixels classified as background, the points will be classified as invalid.
+ - Points that are projected into images at pixels with different non background classes, the points will be classified as invalid.
+ - Points that are projected into at least one image to a non background class, and all the classes coincide, the points will be
+ classified as the projected class.
+ - The borders between classes in the segmented image are considered as background (morphological dilation in the contour)
+
+In this context, invalid means that the label is unknown, and should be masked out during pointcloud segmentation training.
+
+Limitation of the projective approach:
+ - Due to sensor calibration, vehicle movement, and lidar scanning, classification will leak between objects. This is
+ somewhat addressed through temporal consistency and morphological operations.
+ - Projective approaches, due to the baseline and the nature of the sensors, will provide wrong labels in some cases,
+ even when the image segmentation is perfect (e.g., vehicle behind a fence).
+ - Some parts of the pointcloud will not be classified and will be masked out during pointcloud classification training.
+ If this phenomenah is consistent, some objects will never receive labels and have potential errors at test time.
+
+## (Optional) Refine pointcloud segmentation with object detection cuboids
+
+It is possible to refine the segmentation labels using the cuboids from a object detection groundtruth.
+For the `t4dataset`, it can be done using the following command:
+
+```bash
+python projects/PCSegSAM2/segment_t4dataset_projective.py \
+ --root_path ./data/t4dataset \
+ --database_config autoware_ml/configs/detection3d/dataset/t4dataset/xx1.py \
+ --segmentation_config projects/PCSegSAM2/config/t4dataset_segment.yaml
+```
+
+## (Optional) Generate BEV videos with the segmentation result
+
+BEV videos of the setmented pointclouds can be generated with the following command:
+
+```bash
+python projects/PCSegSAM2/generate_segmentation_videos.py \
+ --root_path ./data/t4dataset \
+ --out_videos ./videos \
+ --dataset_config autoware_ml/configs/detection3d/dataset/t4dataset/xx1.py \
+ --segmentation_config projects/PCSegSAM2/config/t4dataset_segment.yaml
+```
diff --git a/projects/PCSegSAM2/checkpoints/download_ckpts.sh b/projects/PCSegSAM2/checkpoints/download_ckpts.sh
new file mode 100644
index 00000000..a9d73c9b
--- /dev/null
+++ b/projects/PCSegSAM2/checkpoints/download_ckpts.sh
@@ -0,0 +1,60 @@
+
+#!/bin/bash
+
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Use either wget or curl to download the checkpoints
+if command -v wget &> /dev/null; then
+ CMD="wget"
+elif command -v curl &> /dev/null; then
+ CMD="curl -L -O"
+else
+ echo "Please install wget or curl to download the checkpoints."
+ exit 1
+fi
+
+# Define the URLs for SAM 2 checkpoints
+# SAM2_BASE_URL="https://dl.fbaipublicfiles.com/segment_anything_2/072824"
+# sam2_hiera_t_url="${SAM2_BASE_URL}/sam2_hiera_tiny.pt"
+# sam2_hiera_s_url="${SAM2_BASE_URL}/sam2_hiera_small.pt"
+# sam2_hiera_b_plus_url="${SAM2_BASE_URL}/sam2_hiera_base_plus.pt"
+# sam2_hiera_l_url="${SAM2_BASE_URL}/sam2_hiera_large.pt"
+
+# Download each of the four checkpoints using wget
+# echo "Downloading sam2_hiera_tiny.pt checkpoint..."
+# $CMD $sam2_hiera_t_url || { echo "Failed to download checkpoint from $sam2_hiera_t_url"; exit 1; }
+
+# echo "Downloading sam2_hiera_small.pt checkpoint..."
+# $CMD $sam2_hiera_s_url || { echo "Failed to download checkpoint from $sam2_hiera_s_url"; exit 1; }
+
+# echo "Downloading sam2_hiera_base_plus.pt checkpoint..."
+# $CMD $sam2_hiera_b_plus_url || { echo "Failed to download checkpoint from $sam2_hiera_b_plus_url"; exit 1; }
+
+# echo "Downloading sam2_hiera_large.pt checkpoint..."
+# $CMD $sam2_hiera_l_url || { echo "Failed to download checkpoint from $sam2_hiera_l_url"; exit 1; }
+
+# Define the URLs for SAM 2.1 checkpoints
+SAM2p1_BASE_URL="https://dl.fbaipublicfiles.com/segment_anything_2/092824"
+sam2p1_hiera_t_url="${SAM2p1_BASE_URL}/sam2.1_hiera_tiny.pt"
+sam2p1_hiera_s_url="${SAM2p1_BASE_URL}/sam2.1_hiera_small.pt"
+sam2p1_hiera_b_plus_url="${SAM2p1_BASE_URL}/sam2.1_hiera_base_plus.pt"
+sam2p1_hiera_l_url="${SAM2p1_BASE_URL}/sam2.1_hiera_large.pt"
+
+# SAM 2.1 checkpoints
+echo "Downloading sam2.1_hiera_tiny.pt checkpoint..."
+$CMD $sam2p1_hiera_t_url || { echo "Failed to download checkpoint from $sam2p1_hiera_t_url"; exit 1; }
+
+echo "Downloading sam2.1_hiera_small.pt checkpoint..."
+$CMD $sam2p1_hiera_s_url || { echo "Failed to download checkpoint from $sam2p1_hiera_s_url"; exit 1; }
+
+echo "Downloading sam2.1_hiera_base_plus.pt checkpoint..."
+$CMD $sam2p1_hiera_b_plus_url || { echo "Failed to download checkpoint from $sam2p1_hiera_b_plus_url"; exit 1; }
+
+echo "Downloading sam2.1_hiera_large.pt checkpoint..."
+$CMD $sam2p1_hiera_l_url || { echo "Failed to download checkpoint from $sam2p1_hiera_l_url"; exit 1; }
+
+echo "All checkpoints are downloaded successfully."
diff --git a/projects/PCSegSAM2/config/t4dataset_segment.yaml b/projects/PCSegSAM2/config/t4dataset_segment.yaml
new file mode 100644
index 00000000..4a847a0f
--- /dev/null
+++ b/projects/PCSegSAM2/config/t4dataset_segment.yaml
@@ -0,0 +1,114 @@
+
+sam2:
+ sam2_classes: [
+ "rider",
+ "pedestrian",
+ "animal",
+ "car",
+ "truck",
+ "bus",
+ "trailer",
+ "motorcycle",
+ "bicycle",
+ "road",
+ "sidewalk",
+ "crosswalk",
+ "lane",
+ "vegetation",
+ "tree",
+ "plant",
+ "grass",
+ "bush",
+ "flowerbed",
+ "flower",
+ "greenery",
+ "cone",
+ "obstacle",
+ "debris",
+ "building",
+ "wall",
+ "fence",
+ "bollard",
+ "pole",
+ "lamp post",
+ "traffic light",
+ "traffic sign",
+ ]
+ background_value: 255
+ sam2_checkpoint: "projects/PCSegSAM2/checkpoints/sam2.1_hiera_large.pt"
+ sam2_cfg: "sam2/configs/sam2.1/sam2.1_hiera_l.yaml"
+ grounding_dino_checkpoint: "projects/PCSegSAM2/gdino_checkpoints/groundingdino_swint_ogc.pth"
+ grounding_dino_cfg: "projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinT_OGC.py"
+ box_threshold: 0.30
+ text_threshold: 0.25
+ override: False
+ only_key_frames: False
+projective_segmentation:
+ num_consistent_frames: 5
+ invalid_value: 255
+ background_value: 255
+ ground_value: 3
+ min_non_ground_z: -0.2
+ fill_boundaries_with_invalid: True
+ fill_boundaries_width: 10
+ num_workers: 24
+ classes_map: {
+ "rider": 2,
+ "pedestrian": 2,
+ "animal": 5,
+ "car": 0,
+ "truck": 0,
+ "bus": 0,
+ "trailer": 0,
+ "motorcycle": 1,
+ "bicycle": 1,
+ "road": 3,
+ "sidewalk": 3,
+ "crosswalk": 3,
+ "lane": 3,
+ "vegetation": 4,
+ "tree": 4,
+ "plant": 4,
+ "grass": 4,
+ "bush": 4,
+ "flowerbed": 4,
+ "flower": 4,
+ "greenery": 4,
+ "cone": 5,
+ "obstacle": 5,
+ "debris": 5,
+ "building": 5,
+ "wall": 5,
+ "fence": 5,
+ "bollard": 5,
+ "pole": 5,
+ "lamp post": 5,
+ "traffic light": 5,
+ "traffic sign": 5
+ }
+cuboid_segmentation:
+ invalid_value: 255
+ reset_classes: [0, 1, 2]
+ classes_map: {
+ "car": 0,
+ "truck": 0,
+ "bus": 0,
+ "trailer": 0,
+ "motorcycle": 1,
+ "bicycle": 1,
+ "pedestrian": 2,
+
+ }
+visualization:
+ color_map: {
+ 255: [0.85, 0.85, 0.85], # invalid
+ 5: [1.0, 0.0, 1.0], # bg
+ 0: [0.0, 0.0, 1.0], # vehicle
+ 1: [1.0, 0.0, 0.0], # bicycle - motorcycle
+ 2: [1.0, 1.0, 0.0], # pedestrian
+ 3: [0.4, 0.4, 0.4], # road
+ 4: [0.0, 1.0, 0.0], # vegetation
+ 6: [0.0, 1.0, 1.0] # cone - obstacle
+ }
+ min_range: -64.0
+ max_range: 64.0
diff --git a/projects/PCSegSAM2/gdino_checkpoints/download_ckpts.sh b/projects/PCSegSAM2/gdino_checkpoints/download_ckpts.sh
new file mode 100644
index 00000000..c6dee178
--- /dev/null
+++ b/projects/PCSegSAM2/gdino_checkpoints/download_ckpts.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Use either wget or curl to download the checkpoints
+if command -v wget &> /dev/null; then
+ CMD="wget"
+elif command -v curl &> /dev/null; then
+ CMD="curl -L -O -k"
+else
+ echo "Please install wget or curl to download the checkpoints."
+ exit 1
+fi
+
+# Define the URLs for the checkpoints
+BASE_URL="https://github.com/IDEA-Research/GroundingDINO/releases/download/"
+swint_ogc_url="${BASE_URL}v0.1.0-alpha/groundingdino_swint_ogc.pth"
+swinb_cogcoor_url="${BASE_URL}v0.1.0-alpha2/groundingdino_swinb_cogcoor.pth"
+
+
+
+# Download each of the four checkpoints using wget
+echo "Downloading groundingdino_swint_ogc.pth checkpoint..."
+$CMD $swint_ogc_url || { echo "Failed to download checkpoint from $swint_ogc_url"; exit 1; }
+
+echo "Downloading groundingdino_swinb_cogcoor.pth checkpoint..."
+$CMD $swinb_cogcoor_url || { echo "Failed to download checkpoint from $swinb_cogcoor_url"; exit 1; }
+
+echo "All checkpoints are downloaded successfully."
diff --git a/projects/PCSegSAM2/generate_segmentation_videos.py b/projects/PCSegSAM2/generate_segmentation_videos.py
new file mode 100644
index 00000000..f77068e5
--- /dev/null
+++ b/projects/PCSegSAM2/generate_segmentation_videos.py
@@ -0,0 +1,295 @@
+import argparse
+import logging
+import os
+import os.path as osp
+import re
+import warnings
+from pathlib import Path
+from typing import Any, Dict, List
+
+import matplotlib
+
+matplotlib.use("Agg")
+import concurrent.futures
+
+import cv2
+import matplotlib.colors as mcolors
+import matplotlib.pyplot as plt
+import numpy as np
+import yaml
+from mmengine.config import Config
+from t4_devkit import Tier4
+from t4_devkit.schema import Sample
+from tqdm import tqdm
+
+from tools.detection3d.t4dataset_converters.t4converter import (
+ extract_tier4_data,
+)
+
+
+def get_lidar_token(sample_rec: Sample) -> str:
+ data_dict = sample_rec.data
+ if "LIDAR_TOP" in data_dict:
+ return data_dict["LIDAR_TOP"]
+ elif "LIDAR_CONCAT" in data_dict:
+ return data_dict["LIDAR_CONCAT"]
+ else:
+ return None
+
+
+def get_scene_root_dir_path(
+ root_path: str,
+ dataset_version: str,
+ scene_id: str,
+) -> str:
+ """
+ This function checks if the provided `scene_root_dir_path` follows the new directory structure
+ of the T4 Dataset, which should look like `$T4DATASET_VERSION/$T4DATASET_ID/$VERSION_ID/`.
+ If the `scene_root_dir_path` does contain a version directory, it searches for the latest version directory
+ under the `scene_root_dir_path` and returns the updated path.
+ If no version directory is found, it prints a deprecation warning and returns the original `scene_root_dir_path`.
+
+ Args:
+ root_path (str): The root path of the T4 Dataset.
+ dataset_version (str): The dataset version like 'db_jpntaxi_v2'
+ scene_id: The scene id token.
+ Returns:
+ str: The updated path containing the version directory if it exists,
+ otherwise the original `scene_root_dir_path`.
+ """
+ # an integer larger than or equal to 0
+ version_pattern = re.compile(r"^\d+$")
+
+ scene_root_dir_path = osp.join(root_path, dataset_version, scene_id)
+
+ version_dirs = [d for d in os.listdir(scene_root_dir_path) if version_pattern.match(d)]
+
+ if version_dirs:
+ version_id = sorted(version_dirs, key=int)[-1]
+ return os.path.join(scene_root_dir_path, version_id)
+ else:
+ warnings.simplefilter("always")
+ warnings.warn(
+ f"The directory structure of T4 Dataset is deprecated. In the newer version, the directory structure should look something like `$T4DATASET_ID/$VERSION_ID/`. Please update your Web.Auto CLI to the latest version.",
+ DeprecationWarning,
+ )
+ return scene_root_dir_path
+
+
+def create_scatter_figure(pointcloud, seg, cmap, min_range, max_range, marker_size=1):
+
+ x_lim = (min_range, max_range)
+ y_lim = (min_range, max_range)
+
+ fig, ax = plt.subplots(figsize=(12, 12))
+
+ x = pointcloud[:, 0]
+ y = pointcloud[:, 1]
+
+ scatter = ax.scatter(x, y, c=seg, cmap=cmap, s=marker_size)
+
+ ax.set_xlim(x_lim)
+ ax.set_ylim(y_lim)
+ ax.set_xlabel("x")
+ ax.set_ylabel("y")
+ ax.set_title("BEV Seg")
+ ax.set_aspect("equal", adjustable="box")
+
+ fig.tight_layout()
+
+ return fig, ax
+
+
+def get_frame_from_fig(fig):
+
+ fig.canvas.draw()
+
+ img = np.frombuffer(fig.canvas.buffer_rgba(), dtype=np.uint8)
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (4,))
+ img = img[:, :, 0:3]
+
+ return img
+
+
+def generate_bev_segmentation(
+ root_path: str,
+ cfg: Any,
+ segmentation_cfg: Any,
+ t4: Tier4,
+ sample: Sample,
+ cmap: List,
+):
+ lidar_token = get_lidar_token(sample)
+ if lidar_token is None:
+ logging.warning(
+ f"sample {sample['token']} doesn't have lidar",
+ )
+ return
+ (
+ pose_record,
+ cs_record,
+ sd_record,
+ scene_record,
+ log_record,
+ boxes,
+ lidar_path,
+ e2g_r_mat,
+ l2e_r_mat,
+ e2g_t,
+ l2e_t,
+ ) = extract_tier4_data(t4, sample, lidar_token)
+
+ lidar_l2e_transform = np.eye(4, dtype=np.float32)
+ lidar_l2e_transform[0:3, 0:3] = l2e_r_mat
+ lidar_l2e_transform[0:3, 3] = l2e_t
+
+ # Load points
+ points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
+ num_points = points.shape[0]
+ points_lcs = np.hstack([points[:, 0:3], np.ones((num_points, 1))])
+
+ points_ecs = points_lcs @ lidar_l2e_transform.T
+
+ lidar_path = Path(lidar_path)
+ basename = lidar_path.name.split(".")[0]
+ seg_path = lidar_path.parent / f"{basename}_seg.npy"
+
+ seg = np.load(str(seg_path))
+
+ min_range = segmentation_cfg["visualization"]["min_range"]
+ max_range = segmentation_cfg["visualization"]["max_range"]
+
+ fig, ax = create_scatter_figure(points_ecs, seg, cmap, min_range, max_range)
+
+ bev_img = get_frame_from_fig(fig)
+
+ plt.close(fig)
+
+ return bev_img
+
+
+def generate_videos_scene(args, cfg, segmentation_cfg, dataset_version, custom_cmap, scene_id):
+
+ logging.info(f"Creating video for scene: {scene_id}")
+ scene_root_dir_path = get_scene_root_dir_path(
+ args.root_path,
+ dataset_version,
+ scene_id,
+ )
+
+ if not osp.isdir(scene_root_dir_path):
+ raise ValueError(f"{scene_root_dir_path} does not exist.")
+
+ t4 = Tier4(version="annotation", data_root=scene_root_dir_path, verbose=False)
+
+ bev_images = []
+
+ for i, sample in enumerate(tqdm(t4.sample)):
+ bev_images.append(generate_bev_segmentation(args.root_path, cfg, segmentation_cfg, t4, sample, custom_cmap))
+
+ generate_video(args.out_videos, scene_id, bev_images)
+
+
+def generate_videos_scene_wrapper(args):
+ return generate_videos_scene(*args)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Create data info for T4dataset")
+
+ parser.add_argument(
+ "--dataset_config",
+ type=str,
+ required=True,
+ help="config for T4dataset",
+ )
+
+ parser.add_argument(
+ "--segmentation_config",
+ type=str,
+ required=True,
+ help="segmentation config",
+ )
+
+ parser.add_argument(
+ "--root_path",
+ type=str,
+ required=True,
+ help="specify the root path of dataset",
+ )
+
+ parser.add_argument(
+ "--out_videos",
+ type=str,
+ required=True,
+ help="directory to save segmented videos",
+ )
+
+ args = parser.parse_args()
+ return args
+
+
+def generate_video(video_folder, scene_id, images):
+
+ if len(images) == 0:
+ logging.info("Empty list. Already processed (?)")
+ return
+
+ height, width, layers = images[0].shape
+
+ output_file = Path(video_folder) / f"{scene_id}_bev_seg.mp4"
+ fps = 2
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+
+ video_writer = cv2.VideoWriter(output_file, fourcc, fps, (width, height))
+
+ for image in images:
+ image = cv2.resize(image, (width, height))
+ video_writer.write(image)
+
+ video_writer.release()
+ logging.info(f"Video created successfully: {output_file}")
+
+
+def main():
+ args = parse_args()
+
+ logging.basicConfig(level=logging.INFO)
+
+ # load config
+ cfg = Config.fromfile(args.dataset_config)
+ os.makedirs(args.out_videos, exist_ok=True)
+
+ with open(args.segmentation_config, "r") as f:
+ segmentation_cfg = yaml.safe_load(f)
+
+ # TODO(knzo25): hack since I only want to test part of the db
+ cfg.dataset_version_list = ["db_jpntaxi_v2"]
+
+ # Create cmap
+ cmap_dict = segmentation_cfg["visualization"]["color_map"]
+ cmap_list = [cmap_dict[i] if i in cmap_dict else [0.0, 0.0, 0.0] for i in range(0, 256)]
+ custom_cmap = mcolors.ListedColormap(cmap_list)
+
+ num_workers = segmentation_cfg["projective_segmentation"]["num_workers"]
+
+ for dataset_version in cfg.dataset_version_list:
+ dataset_list = osp.join(cfg.dataset_version_config_root, dataset_version + ".yaml")
+
+ with open(dataset_list, "r") as f:
+ dataset_list_dict: Dict[str, List[str]] = yaml.safe_load(f)
+
+ for split in ["train", "val", "test"]:
+ logging.info(f"Creating videos from split: {split}")
+
+ scenes_list = dataset_list_dict.get(split, [])
+ pool_args = [
+ (args, cfg, segmentation_cfg, dataset_version, custom_cmap, scene_id) for scene_id in scenes_list
+ ]
+
+ with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor:
+ executor.map(generate_videos_scene_wrapper, pool_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/projects/PCSegSAM2/grounding_dino/.gitignore b/projects/PCSegSAM2/grounding_dino/.gitignore
new file mode 100644
index 00000000..60716be3
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/.gitignore
@@ -0,0 +1,146 @@
+# IDE
+.idea/
+.vscode/
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# vscode
+.vscode/
+output/
+outputs/
+subs/
+logs/
+
+grounding/config/configs
+grounding/version.py
+
+vis/
+tmp/
diff --git a/projects/PCSegSAM2/grounding_dino/LICENSE b/projects/PCSegSAM2/grounding_dino/LICENSE
new file mode 100644
index 00000000..f1460f5e
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 - present, IDEA Research.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/projects/PCSegSAM2/grounding_dino/README.md b/projects/PCSegSAM2/grounding_dino/README.md
new file mode 100644
index 00000000..489989b0
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/README.md
@@ -0,0 +1,366 @@
+
+

+
+
+# :sauropod: Grounding DINO
+
+[](https://paperswithcode.com/sota/zero-shot-object-detection-on-mscoco?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/zero-shot-object-detection-on-odinw?p=grounding-dino-marrying-dino-with-grounded) \
+[](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/object-detection-on-coco?p=grounding-dino-marrying-dino-with-grounded)
+
+
+**[IDEA-CVR, IDEA-Research](https://github.com/IDEA-Research)**
+
+[Shilong Liu](http://www.lsl.zone/), [Zhaoyang Zeng](https://scholar.google.com/citations?user=U_cvvUwAAAAJ&hl=zh-CN&oi=ao), [Tianhe Ren](https://rentainhe.github.io/), [Feng Li](https://scholar.google.com/citations?user=ybRe9GcAAAAJ&hl=zh-CN), [Hao Zhang](https://scholar.google.com/citations?user=B8hPxMQAAAAJ&hl=zh-CN), [Jie Yang](https://github.com/yangjie-cv), [Chunyuan Li](https://scholar.google.com/citations?user=Zd7WmXUAAAAJ&hl=zh-CN&oi=ao), [Jianwei Yang](https://jwyang.github.io/), [Hang Su](https://scholar.google.com/citations?hl=en&user=dxN1_X0AAAAJ&view_op=list_works&sortby=pubdate), [Jun Zhu](https://scholar.google.com/citations?hl=en&user=axsP38wAAAAJ), [Lei Zhang](https://www.leizhang.org/):email:.
+
+
+[[`Paper`](https://arxiv.org/abs/2303.05499)] [[`Demo`](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)] [[`BibTex`](#black_nib-citation)]
+
+
+PyTorch implementation and pretrained models for Grounding DINO. For details, see the paper **[Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499)**.
+
+- 🔥 **[Grounding DINO 1.5](https://github.com/IDEA-Research/Grounding-DINO-1.5-API)** is released now, which is IDEA Research's **Most Capable** Open-World Object Detection Model!
+- 🔥 **[Grounding DINO](https://arxiv.org/abs/2303.05499)** and **[Grounded SAM](https://arxiv.org/abs/2401.14159)** are now supported in Huggingface. For more convenient use, you can refer to [this documentation](https://huggingface.co/docs/transformers/model_doc/grounding-dino)
+
+## :sun_with_face: Helpful Tutorial
+
+- :grapes: [[Read our arXiv Paper](https://arxiv.org/abs/2303.05499)]
+- :apple: [[Watch our simple introduction video on YouTube](https://youtu.be/wxWDt5UiwY8)]
+- :blossom: [[Try the Colab Demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb)]
+- :sunflower: [[Try our Official Huggingface Demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)]
+- :maple_leaf: [[Watch the Step by Step Tutorial about GroundingDINO by Roboflow AI](https://youtu.be/cMa77r3YrDk)]
+- :mushroom: [[GroundingDINO: Automated Dataset Annotation and Evaluation by Roboflow AI](https://youtu.be/C4NqaRBz_Kw)]
+- :hibiscus: [[Accelerate Image Annotation with SAM and GroundingDINO by Roboflow AI](https://youtu.be/oEQYStnF2l8)]
+- :white_flower: [[Autodistill: Train YOLOv8 with ZERO Annotations based on Grounding-DINO and Grounded-SAM by Roboflow AI](https://github.com/autodistill/autodistill)]
+
+
+
+
+
+
+## :sparkles: Highlight Projects
+
+- [Semantic-SAM: a universal image segmentation model to enable segment and recognize anything at any desired granularity.](https://github.com/UX-Decoder/Semantic-SAM),
+- [DetGPT: Detect What You Need via Reasoning](https://github.com/OptimalScale/DetGPT)
+- [Grounded-SAM: Marrying Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)
+- [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb)
+- [Grounding DINO with GLIGEN for Controllable Image Editing](demo/image_editing_with_groundingdino_gligen.ipynb)
+- [OpenSeeD: A Simple and Strong Openset Segmentation Model](https://github.com/IDEA-Research/OpenSeeD)
+- [SEEM: Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once)
+- [X-GPT: Conversational Visual Agent supported by X-Decoder](https://github.com/microsoft/X-Decoder/tree/xgpt)
+- [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://github.com/gligen/GLIGEN)
+- [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA)
+
+
+
+
+
+
+
+
+## :bulb: Highlight
+
+- **Open-Set Detection.** Detect **everything** with language!
+- **High Performance.** COCO zero-shot **52.5 AP** (training without COCO data!). COCO fine-tune **63.0 AP**.
+- **Flexible.** Collaboration with Stable Diffusion for Image Editting.
+
+
+
+
+## :fire: News
+- **`2023/07/18`**: We release [Semantic-SAM](https://github.com/UX-Decoder/Semantic-SAM), a universal image segmentation model to enable segment and recognize anything at any desired granularity. **Code** and **checkpoint** are available!
+- **`2023/06/17`**: We provide an example to evaluate Grounding DINO on COCO zero-shot performance.
+- **`2023/04/15`**: Refer to [CV in the Wild Readings](https://github.com/Computer-Vision-in-the-Wild/CVinW_Readings) for those who are interested in open-set recognition!
+- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
+- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
+- **`2023/04/06`**: We build a new demo by marrying GroundingDINO with [Segment-Anything](https://github.com/facebookresearch/segment-anything) named **[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)** aims to support segmentation in GroundingDINO.
+- **`2023/03/28`**: A YouTube [video](https://youtu.be/cMa77r3YrDk) about Grounding DINO and basic object detection prompt engineering. [[SkalskiP](https://github.com/SkalskiP)]
+- **`2023/03/28`**: Add a [demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo) on Hugging Face Space!
+- **`2023/03/27`**: Support CPU-only mode. Now the model can run on machines without GPUs.
+- **`2023/03/25`**: A [demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) for Grounding DINO is available at Colab. [[SkalskiP](https://github.com/SkalskiP)]
+- **`2023/03/22`**: Code is available Now!
+
+
+
+Description
+
+ Paper introduction.
+
+Marrying Grounding DINO and GLIGEN
+
+
+
+## :star: Explanations/Tips for Grounding DINO Inputs and Outputs
+- Grounding DINO accepts an `(image, text)` pair as inputs.
+- It outputs `900` (by default) object boxes. Each box has similarity scores across all input words. (as shown in Figures below.)
+- We defaultly choose the boxes whose highest similarities are higher than a `box_threshold`.
+- We extract the words whose similarities are higher than the `text_threshold` as predicted labels.
+- If you want to obtain objects of specific phrases, like the `dogs` in the sentence `two dogs with a stick.`, you can select the boxes with highest text similarities with `dogs` as final outputs.
+- Note that each word can be split to **more than one** tokens with different tokenlizers. The number of words in a sentence may not equal to the number of text tokens.
+- We suggest separating different category names with `.` for Grounding DINO.
+
+
+
+## :label: TODO
+
+- [x] Release inference code and demo.
+- [x] Release checkpoints.
+- [x] Grounding DINO with Stable Diffusion and GLIGEN demos.
+- [ ] Release training codes.
+
+## :hammer_and_wrench: Install
+
+**Note:**
+
+0. If you have a CUDA environment, please make sure the environment variable `CUDA_HOME` is set. It will be compiled under CPU-only mode if no CUDA available.
+
+Please make sure following the installation steps strictly, otherwise the program may produce:
+```bash
+NameError: name '_C' is not defined
+```
+
+If this happened, please reinstalled the groundingDINO by reclone the git and do all the installation steps again.
+
+#### how to check cuda:
+```bash
+echo $CUDA_HOME
+```
+If it print nothing, then it means you haven't set up the path/
+
+Run this so the environment variable will be set under current shell.
+```bash
+export CUDA_HOME=/path/to/cuda-11.3
+```
+
+Notice the version of cuda should be aligned with your CUDA runtime, for there might exists multiple cuda at the same time.
+
+If you want to set the CUDA_HOME permanently, store it using:
+
+```bash
+echo 'export CUDA_HOME=/path/to/cuda' >> ~/.bashrc
+```
+after that, source the bashrc file and check CUDA_HOME:
+```bash
+source ~/.bashrc
+echo $CUDA_HOME
+```
+
+In this example, /path/to/cuda-11.3 should be replaced with the path where your CUDA toolkit is installed. You can find this by typing **which nvcc** in your terminal:
+
+For instance,
+if the output is /usr/local/cuda/bin/nvcc, then:
+```bash
+export CUDA_HOME=/usr/local/cuda
+```
+**Installation:**
+
+1.Clone the GroundingDINO repository from GitHub.
+
+```bash
+git clone https://github.com/IDEA-Research/GroundingDINO.git
+```
+
+2. Change the current directory to the GroundingDINO folder.
+
+```bash
+cd GroundingDINO/
+```
+
+3. Install the required dependencies in the current directory.
+
+```bash
+pip install -e .
+```
+
+4. Download pre-trained model weights.
+
+```bash
+mkdir weights
+cd weights
+wget -q https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
+cd ..
+```
+
+## :arrow_forward: Demo
+Check your GPU ID (only if you're using a GPU)
+
+```bash
+nvidia-smi
+```
+Replace `{GPU ID}`, `image_you_want_to_detect.jpg`, and `"dir you want to save the output"` with appropriate values in the following command
+```bash
+CUDA_VISIBLE_DEVICES={GPU ID} python demo/inference_on_a_image.py \
+-c groundingdino/config/GroundingDINO_SwinT_OGC.py \
+-p weights/groundingdino_swint_ogc.pth \
+-i image_you_want_to_detect.jpg \
+-o "dir you want to save the output" \
+-t "chair"
+ [--cpu-only] # open it for cpu mode
+```
+
+If you would like to specify the phrases to detect, here is a demo:
+```bash
+CUDA_VISIBLE_DEVICES={GPU ID} python demo/inference_on_a_image.py \
+-c groundingdino/config/GroundingDINO_SwinT_OGC.py \
+-p ./groundingdino_swint_ogc.pth \
+-i .asset/cat_dog.jpeg \
+-o logs/1111 \
+-t "There is a cat and a dog in the image ." \
+--token_spans "[[[9, 10], [11, 14]], [[19, 20], [21, 24]]]"
+ [--cpu-only] # open it for cpu mode
+```
+The token_spans specify the start and end positions of a phrases. For example, the first phrase is `[[9, 10], [11, 14]]`. `"There is a cat and a dog in the image ."[9:10] = 'a'`, `"There is a cat and a dog in the image ."[11:14] = 'cat'`. Hence it refers to the phrase `a cat` . Similarly, the `[[19, 20], [21, 24]]` refers to the phrase `a dog`.
+
+See the `demo/inference_on_a_image.py` for more details.
+
+**Running with Python:**
+
+```python
+from groundingdino.util.inference import load_model, load_image, predict, annotate
+import cv2
+
+model = load_model("groundingdino/config/GroundingDINO_SwinT_OGC.py", "weights/groundingdino_swint_ogc.pth")
+IMAGE_PATH = "weights/dog-3.jpeg"
+TEXT_PROMPT = "chair . person . dog ."
+BOX_TRESHOLD = 0.35
+TEXT_TRESHOLD = 0.25
+
+image_source, image = load_image(IMAGE_PATH)
+
+boxes, logits, phrases = predict(
+ model=model,
+ image=image,
+ caption=TEXT_PROMPT,
+ box_threshold=BOX_TRESHOLD,
+ text_threshold=TEXT_TRESHOLD
+)
+
+annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)
+cv2.imwrite("annotated_image.jpg", annotated_frame)
+```
+**Web UI**
+
+We also provide a demo code to integrate Grounding DINO with Gradio Web UI. See the file `demo/gradio_app.py` for more details.
+
+**Notebooks**
+
+- We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
+- We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
+
+## COCO Zero-shot Evaluations
+
+We provide an example to evaluate Grounding DINO zero-shot performance on COCO. The results should be **48.5**.
+
+```bash
+CUDA_VISIBLE_DEVICES=0 \
+python demo/test_ap_on_coco.py \
+ -c groundingdino/config/GroundingDINO_SwinT_OGC.py \
+ -p weights/groundingdino_swint_ogc.pth \
+ --anno_path /path/to/annoataions/ie/instances_val2017.json \
+ --image_dir /path/to/imagedir/ie/val2017
+```
+
+
+## :luggage: Checkpoints
+
+
+
+
+
+ |
+ name |
+ backbone |
+ Data |
+ box AP on COCO |
+ Checkpoint |
+ Config |
+
+
+
+
+ 1 |
+ GroundingDINO-T |
+ Swin-T |
+ O365,GoldG,Cap4M |
+ 48.4 (zero-shot) / 57.2 (fine-tune) |
+ GitHub link | HF link |
+ link |
+
+
+ 2 |
+ GroundingDINO-B |
+ Swin-B |
+ COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO |
+ 56.7 |
+ GitHub link | HF link
+ | link |
+
+
+
+
+## :medal_military: Results
+
+
+
+COCO Object Detection Results
+
+
+
+
+
+
+ODinW Object Detection Results
+
+
+
+
+
+
+Marrying Grounding DINO with Stable Diffusion for Image Editing
+
+See our example notebook for more details.
+
+
+
+
+
+
+Marrying Grounding DINO with GLIGEN for more Detailed Image Editing.
+
+See our example notebook for more details.
+
+
+
+## :sauropod: Model: Grounding DINO
+
+Includes: a text backbone, an image backbone, a feature enhancer, a language-guided query selection, and a cross-modality decoder.
+
+
+
+
+## :hearts: Acknowledgement
+
+Our model is related to [DINO](https://github.com/IDEA-Research/DINO) and [GLIP](https://github.com/microsoft/GLIP). Thanks for their great work!
+
+We also thank great previous work including DETR, Deformable DETR, SMCA, Conditional DETR, Anchor DETR, Dynamic DETR, DAB-DETR, DN-DETR, etc. More related work are available at [Awesome Detection Transformer](https://github.com/IDEACVR/awesome-detection-transformer). A new toolbox [detrex](https://github.com/IDEA-Research/detrex) is available as well.
+
+Thanks [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) and [GLIGEN](https://github.com/gligen/GLIGEN) for their awesome models.
+
+
+## :black_nib: Citation
+
+If you find our work helpful for your research, please consider citing the following BibTeX entry.
+
+```bibtex
+@article{liu2023grounding,
+ title={Grounding dino: Marrying dino with grounded pre-training for open-set object detection},
+ author={Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Li, Feng and Zhang, Hao and Yang, Jie and Li, Chunyuan and Yang, Jianwei and Su, Hang and Zhu, Jun and others},
+ journal={arXiv preprint arXiv:2303.05499},
+ year={2023}
+}
+```
diff --git a/projects/PCSegSAM2/grounding_dino/environment.yaml b/projects/PCSegSAM2/grounding_dino/environment.yaml
new file mode 100644
index 00000000..3ac1937d
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/environment.yaml
@@ -0,0 +1,248 @@
+name: dino
+channels:
+ - pytorch
+ - nvidia
+ - conda-forge
+ - defaults
+dependencies:
+ - addict=2.4.0=pyhd8ed1ab_2
+ - aiohttp=3.8.5=py39ha55989b_0
+ - aiosignal=1.3.1=pyhd8ed1ab_0
+ - asttokens=2.0.5=pyhd3eb1b0_0
+ - async-timeout=4.0.3=pyhd8ed1ab_0
+ - attrs=23.1.0=pyh71513ae_1
+ - aws-c-auth=0.7.0=h6f3c987_2
+ - aws-c-cal=0.6.0=h6ba3258_0
+ - aws-c-common=0.8.23=hcfcfb64_0
+ - aws-c-compression=0.2.17=h420beca_1
+ - aws-c-event-stream=0.3.1=had47b81_1
+ - aws-c-http=0.7.11=h72ba615_0
+ - aws-c-io=0.13.28=ha35c040_0
+ - aws-c-mqtt=0.8.14=h4941efa_2
+ - aws-c-s3=0.3.13=he04eaa7_2
+ - aws-c-sdkutils=0.1.11=h420beca_1
+ - aws-checksums=0.1.16=h420beca_1
+ - aws-crt-cpp=0.20.3=h247a981_4
+ - aws-sdk-cpp=1.10.57=h1a0519f_17
+ - backcall=0.2.0=pyhd3eb1b0_0
+ - blas=2.118=mkl
+ - blas-devel=3.9.0=18_win64_mkl
+ - brotli=1.0.9=hcfcfb64_9
+ - brotli-bin=1.0.9=hcfcfb64_9
+ - brotli-python=1.0.9=py39h99910a6_9
+ - bzip2=1.0.8=h8ffe710_4
+ - c-ares=1.19.1=hcfcfb64_0
+ - ca-certificates=2023.08.22=haa95532_0
+ - certifi=2023.7.22=py39haa95532_0
+ - charset-normalizer=3.2.0=pyhd8ed1ab_0
+ - click=8.1.7=win_pyh7428d3b_0
+ - colorama=0.4.6=pyhd8ed1ab_0
+ - comm=0.1.2=py39haa95532_0
+ - contourpy=1.1.1=py39h1f6ef14_1
+ - cuda-cccl=12.2.140=0
+ - cuda-cudart=11.8.89=0
+ - cuda-cudart-dev=11.8.89=0
+ - cuda-cupti=11.8.87=0
+ - cuda-libraries=11.8.0=0
+ - cuda-libraries-dev=11.8.0=0
+ - cuda-nvrtc=11.8.89=0
+ - cuda-nvrtc-dev=11.8.89=0
+ - cuda-nvtx=11.8.86=0
+ - cuda-profiler-api=12.2.140=0
+ - cuda-runtime=11.8.0=0
+ - cycler=0.11.0=pyhd8ed1ab_0
+ - cython=3.0.0=py39h2bbff1b_0
+ - dataclasses=0.8=pyhc8e2a94_3
+ - datasets=2.14.5=pyhd8ed1ab_0
+ - debugpy=1.6.7=py39hd77b12b_0
+ - decorator=5.1.1=pyhd3eb1b0_0
+ - dill=0.3.7=pyhd8ed1ab_0
+ - exceptiongroup=1.0.4=py39haa95532_0
+ - executing=0.8.3=pyhd3eb1b0_0
+ - filelock=3.12.4=pyhd8ed1ab_0
+ - fonttools=4.42.1=py39ha55989b_0
+ - freeglut=3.2.2=h63175ca_2
+ - freetype=2.12.1=hdaf720e_2
+ - frozenlist=1.4.0=py39ha55989b_1
+ - fsspec=2023.6.0=pyh1a96a4e_0
+ - gettext=0.21.1=h5728263_0
+ - glib=2.78.0=h12be248_0
+ - glib-tools=2.78.0=h12be248_0
+ - gst-plugins-base=1.22.6=h001b923_1
+ - gstreamer=1.22.6=hb4038d2_1
+ - huggingface_hub=0.17.3=pyhd8ed1ab_0
+ - icu=70.1=h0e60522_0
+ - idna=3.4=pyhd8ed1ab_0
+ - importlib-metadata=6.8.0=pyha770c72_0
+ - importlib-resources=6.1.0=pyhd8ed1ab_0
+ - importlib_metadata=6.8.0=hd8ed1ab_0
+ - importlib_resources=6.1.0=pyhd8ed1ab_0
+ - intel-openmp=2023.2.0=h57928b3_49503
+ - ipykernel=6.25.0=py39h9909e9c_0
+ - ipython=8.15.0=py39haa95532_0
+ - jasper=2.0.33=hc2e4405_1
+ - jedi=0.18.1=py39haa95532_1
+ - jinja2=3.1.2=pyhd8ed1ab_1
+ - joblib=1.3.2=pyhd8ed1ab_0
+ - jpeg=9e=hcfcfb64_3
+ - jupyter_client=8.1.0=py39haa95532_0
+ - jupyter_core=5.3.0=py39haa95532_0
+ - kiwisolver=1.4.5=py39h1f6ef14_1
+ - krb5=1.20.1=heb0366b_0
+ - lcms2=2.14=h90d422f_0
+ - lerc=4.0.0=h63175ca_0
+ - libabseil=20230125.3=cxx17_h63175ca_0
+ - libarrow=12.0.1=h12e5d06_5_cpu
+ - libblas=3.9.0=18_win64_mkl
+ - libbrotlicommon=1.0.9=hcfcfb64_9
+ - libbrotlidec=1.0.9=hcfcfb64_9
+ - libbrotlienc=1.0.9=hcfcfb64_9
+ - libcblas=3.9.0=18_win64_mkl
+ - libclang=15.0.7=default_h77d9078_3
+ - libclang13=15.0.7=default_h77d9078_3
+ - libcrc32c=1.1.2=h0e60522_0
+ - libcublas=11.11.3.6=0
+ - libcublas-dev=11.11.3.6=0
+ - libcufft=10.9.0.58=0
+ - libcufft-dev=10.9.0.58=0
+ - libcurand=10.3.3.141=0
+ - libcurand-dev=10.3.3.141=0
+ - libcurl=8.1.2=h68f0423_0
+ - libcusolver=11.4.1.48=0
+ - libcusolver-dev=11.4.1.48=0
+ - libcusparse=11.7.5.86=0
+ - libcusparse-dev=11.7.5.86=0
+ - libdeflate=1.14=hcfcfb64_0
+ - libevent=2.1.12=h3671451_1
+ - libffi=3.4.2=h8ffe710_5
+ - libglib=2.78.0=he8f3873_0
+ - libgoogle-cloud=2.12.0=h00b2bdc_1
+ - libgrpc=1.54.3=ha177ca7_0
+ - libhwloc=2.9.3=default_haede6df_1009
+ - libiconv=1.17=h8ffe710_0
+ - liblapack=3.9.0=18_win64_mkl
+ - liblapacke=3.9.0=18_win64_mkl
+ - libnpp=11.8.0.86=0
+ - libnpp-dev=11.8.0.86=0
+ - libnvjpeg=11.9.0.86=0
+ - libnvjpeg-dev=11.9.0.86=0
+ - libogg=1.3.4=h8ffe710_1
+ - libopencv=4.5.3=py39h488c12c_8
+ - libpng=1.6.39=h19919ed_0
+ - libprotobuf=3.21.12=h12be248_2
+ - libsodium=1.0.18=h62dcd97_0
+ - libsqlite=3.43.0=hcfcfb64_0
+ - libssh2=1.11.0=h7dfc565_0
+ - libthrift=0.18.1=h06f6336_2
+ - libtiff=4.4.0=hc4f729c_5
+ - libutf8proc=2.8.0=h82a8f57_0
+ - libuv=1.44.2=hcfcfb64_1
+ - libvorbis=1.3.7=h0e60522_0
+ - libwebp-base=1.3.2=hcfcfb64_0
+ - libxcb=1.13=hcd874cb_1004
+ - libxml2=2.11.5=hc3477c8_1
+ - libzlib=1.2.13=hcfcfb64_5
+ - lz4-c=1.9.4=hcfcfb64_0
+ - m2w64-gcc-libgfortran=5.3.0=6
+ - m2w64-gcc-libs=5.3.0=7
+ - m2w64-gcc-libs-core=5.3.0=7
+ - m2w64-gmp=6.1.0=2
+ - m2w64-libwinpthread-git=5.0.0.4634.697f757=2
+ - markupsafe=2.1.3=py39ha55989b_1
+ - matplotlib-base=3.8.0=py39hf19769e_1
+ - matplotlib-inline=0.1.6=py39haa95532_0
+ - mkl=2022.1.0=h6a75c08_874
+ - mkl-devel=2022.1.0=h57928b3_875
+ - mkl-include=2022.1.0=h6a75c08_874
+ - mpmath=1.3.0=pyhd8ed1ab_0
+ - msys2-conda-epoch=20160418=1
+ - multidict=6.0.4=py39ha55989b_0
+ - multiprocess=0.70.15=py39ha55989b_1
+ - munkres=1.1.4=pyh9f0ad1d_0
+ - nest-asyncio=1.5.6=py39haa95532_0
+ - networkx=3.1=pyhd8ed1ab_0
+ - numpy=1.26.0=py39hddb5d58_0
+ - opencv=4.5.3=py39hcbf5309_8
+ - openjpeg=2.5.0=hc9384bd_1
+ - openssl=3.1.3=hcfcfb64_0
+ - orc=1.9.0=hada7b9e_1
+ - packaging=23.1=pyhd8ed1ab_0
+ - pandas=2.1.1=py39h32e6231_0
+ - parso=0.8.3=pyhd3eb1b0_0
+ - pcre2=10.40=h17e33f8_0
+ - pickleshare=0.7.5=pyhd3eb1b0_1003
+ - pillow=9.2.0=py39h595c93f_3
+ - pip=23.2.1=pyhd8ed1ab_0
+ - platformdirs=3.10.0=pyhd8ed1ab_0
+ - prompt-toolkit=3.0.36=py39haa95532_0
+ - psutil=5.9.0=py39h2bbff1b_0
+ - pthread-stubs=0.4=hcd874cb_1001
+ - pthreads-win32=2.9.1=hfa6e2cd_3
+ - pure_eval=0.2.2=pyhd3eb1b0_0
+ - py-opencv=4.5.3=py39h00e5391_8
+ - pyarrow=12.0.1=py39hca4e8af_5_cpu
+ - pycocotools=2.0.6=py39hc266a54_1
+ - pygments=2.15.1=py39haa95532_1
+ - pyparsing=3.1.1=pyhd8ed1ab_0
+ - pysocks=1.7.1=pyh0701188_6
+ - python=3.9.18=h4de0772_0_cpython
+ - python-dateutil=2.8.2=pyhd8ed1ab_0
+ - python-tzdata=2023.3=pyhd8ed1ab_0
+ - python-xxhash=3.3.0=py39ha55989b_1
+ - python_abi=3.9=4_cp39
+ - pytorch=2.0.1=py3.9_cuda11.8_cudnn8_0
+ - pytorch-cuda=11.8=h24eeafa_5
+ - pytorch-mutex=1.0=cuda
+ - pytz=2023.3.post1=pyhd8ed1ab_0
+ - pywin32=305=py39h2bbff1b_0
+ - pyyaml=6.0.1=py39ha55989b_1
+ - pyzmq=25.1.0=py39hd77b12b_0
+ - qt-main=5.15.8=h720456b_6
+ - re2=2023.03.02=hd4eee63_0
+ - regex=2023.8.8=py39ha55989b_1
+ - requests=2.31.0=pyhd8ed1ab_0
+ - sacremoses=0.0.53=pyhd8ed1ab_0
+ - safetensors=0.3.3=py39hf21820d_1
+ - setuptools=68.2.2=pyhd8ed1ab_0
+ - six=1.16.0=pyh6c4a22f_0
+ - snappy=1.1.10=hfb803bf_0
+ - stack_data=0.2.0=pyhd3eb1b0_0
+ - sympy=1.12=pyh04b8f61_3
+ - tbb=2021.10.0=h91493d7_1
+ - timm=0.9.7=pyhd8ed1ab_0
+ - tk=8.6.13=hcfcfb64_0
+ - tokenizers=0.13.3=py39hca44cb7_0
+ - tomli=2.0.1=pyhd8ed1ab_0
+ - tornado=6.3.2=py39h2bbff1b_0
+ - tqdm=4.66.1=pyhd8ed1ab_0
+ - traitlets=5.7.1=py39haa95532_0
+ - transformers=4.33.2=pyhd8ed1ab_0
+ - typing-extensions=4.8.0=hd8ed1ab_0
+ - typing_extensions=4.8.0=pyha770c72_0
+ - tzdata=2023c=h71feb2d_0
+ - ucrt=10.0.22621.0=h57928b3_0
+ - unicodedata2=15.0.0=py39ha55989b_1
+ - urllib3=2.0.5=pyhd8ed1ab_0
+ - vc=14.3=h64f974e_17
+ - vc14_runtime=14.36.32532=hdcecf7f_17
+ - vs2015_runtime=14.36.32532=h05e6639_17
+ - wcwidth=0.2.5=pyhd3eb1b0_0
+ - wheel=0.41.2=pyhd8ed1ab_0
+ - win_inet_pton=1.1.0=pyhd8ed1ab_6
+ - xorg-libxau=1.0.11=hcd874cb_0
+ - xorg-libxdmcp=1.1.3=hcd874cb_0
+ - xxhash=0.8.2=hcfcfb64_0
+ - xz=5.2.6=h8d14728_0
+ - yaml=0.2.5=h8ffe710_2
+ - yapf=0.40.1=pyhd8ed1ab_0
+ - yarl=1.9.2=py39ha55989b_0
+ - zeromq=4.3.4=hd77b12b_0
+ - zipp=3.17.0=pyhd8ed1ab_0
+ - zlib=1.2.13=hcfcfb64_5
+ - zstd=1.5.5=h12be248_0
+ - pip:
+ - opencv-python==4.8.0.76
+ - supervision==0.6.0
+ - torchaudio==2.0.2
+ - torchvision==0.15.2
+prefix: C:\Users\Makoto\miniconda3\envs\dino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinB_cfg.py b/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinB_cfg.py
new file mode 100644
index 00000000..f490c4bb
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinB_cfg.py
@@ -0,0 +1,43 @@
+batch_size = 1
+modelname = "groundingdino"
+backbone = "swin_B_384_22k"
+position_embedding = "sine"
+pe_temperatureH = 20
+pe_temperatureW = 20
+return_interm_indices = [1, 2, 3]
+backbone_freeze_keywords = None
+enc_layers = 6
+dec_layers = 6
+pre_norm = False
+dim_feedforward = 2048
+hidden_dim = 256
+dropout = 0.0
+nheads = 8
+num_queries = 900
+query_dim = 4
+num_patterns = 0
+num_feature_levels = 4
+enc_n_points = 4
+dec_n_points = 4
+two_stage_type = "standard"
+two_stage_bbox_embed_share = False
+two_stage_class_embed_share = False
+transformer_activation = "relu"
+dec_pred_bbox_embed_share = True
+dn_box_noise_scale = 1.0
+dn_label_noise_ratio = 0.5
+dn_label_coef = 1.0
+dn_bbox_coef = 1.0
+embed_init_tgt = True
+dn_labelbook_size = 2000
+max_text_len = 256
+text_encoder_type = "bert-base-uncased"
+use_text_enhancer = True
+use_fusion_layer = True
+use_checkpoint = True
+use_transformer_ckpt = True
+use_text_cross_attention = True
+text_dropout = 0.0
+fusion_dropout = 0.0
+fusion_droppath = 0.1
+sub_sentence_present = True
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinT_OGC.py b/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinT_OGC.py
new file mode 100644
index 00000000..9158d5f6
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/config/GroundingDINO_SwinT_OGC.py
@@ -0,0 +1,43 @@
+batch_size = 1
+modelname = "groundingdino"
+backbone = "swin_T_224_1k"
+position_embedding = "sine"
+pe_temperatureH = 20
+pe_temperatureW = 20
+return_interm_indices = [1, 2, 3]
+backbone_freeze_keywords = None
+enc_layers = 6
+dec_layers = 6
+pre_norm = False
+dim_feedforward = 2048
+hidden_dim = 256
+dropout = 0.0
+nheads = 8
+num_queries = 900
+query_dim = 4
+num_patterns = 0
+num_feature_levels = 4
+enc_n_points = 4
+dec_n_points = 4
+two_stage_type = "standard"
+two_stage_bbox_embed_share = False
+two_stage_class_embed_share = False
+transformer_activation = "relu"
+dec_pred_bbox_embed_share = True
+dn_box_noise_scale = 1.0
+dn_label_noise_ratio = 0.5
+dn_label_coef = 1.0
+dn_bbox_coef = 1.0
+embed_init_tgt = True
+dn_labelbook_size = 2000
+max_text_len = 256
+text_encoder_type = "bert-base-uncased"
+use_text_enhancer = True
+use_fusion_layer = True
+use_checkpoint = True
+use_transformer_ckpt = True
+use_text_cross_attention = True
+text_dropout = 0.0
+fusion_dropout = 0.0
+fusion_droppath = 0.1
+sub_sentence_present = True
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/config/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/config/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/cocogrounding_eval.py b/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/cocogrounding_eval.py
new file mode 100644
index 00000000..f548d677
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/cocogrounding_eval.py
@@ -0,0 +1,261 @@
+# ------------------------------------------------------------------------
+# Grounding DINO. Midified by Shilong Liu.
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+COCO evaluator that works in distributed mode.
+
+Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
+The difference is that there is less copy-pasting from pycocotools
+in the end of the file, as python3 can suppress prints with contextlib
+"""
+import contextlib
+import copy
+import os
+
+import numpy as np
+import pycocotools.mask as mask_util
+import torch
+from groundingdino.util.misc import all_gather
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+
+class CocoGroundingEvaluator(object):
+ def __init__(self, coco_gt, iou_types, useCats=True):
+ assert isinstance(iou_types, (list, tuple))
+ coco_gt = copy.deepcopy(coco_gt)
+ self.coco_gt = coco_gt
+
+ self.iou_types = iou_types
+ self.coco_eval = {}
+ for iou_type in iou_types:
+ self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
+ self.coco_eval[iou_type].useCats = useCats
+
+ self.img_ids = []
+ self.eval_imgs = {k: [] for k in iou_types}
+ self.useCats = useCats
+
+ def update(self, predictions):
+ img_ids = list(np.unique(list(predictions.keys())))
+ self.img_ids.extend(img_ids)
+
+ for iou_type in self.iou_types:
+ results = self.prepare(predictions, iou_type)
+
+ # suppress pycocotools prints
+ with open(os.devnull, "w") as devnull:
+ with contextlib.redirect_stdout(devnull):
+ coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
+
+ coco_eval = self.coco_eval[iou_type]
+
+ coco_eval.cocoDt = coco_dt
+ coco_eval.params.imgIds = list(img_ids)
+ coco_eval.params.useCats = self.useCats
+ img_ids, eval_imgs = evaluate(coco_eval)
+
+ self.eval_imgs[iou_type].append(eval_imgs)
+
+ def synchronize_between_processes(self):
+ for iou_type in self.iou_types:
+ self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
+ create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
+
+ def accumulate(self):
+ for coco_eval in self.coco_eval.values():
+ coco_eval.accumulate()
+
+ def summarize(self):
+ for iou_type, coco_eval in self.coco_eval.items():
+ print("IoU metric: {}".format(iou_type))
+ coco_eval.summarize()
+
+ def prepare(self, predictions, iou_type):
+ if iou_type == "bbox":
+ return self.prepare_for_coco_detection(predictions)
+ elif iou_type == "segm":
+ return self.prepare_for_coco_segmentation(predictions)
+ elif iou_type == "keypoints":
+ return self.prepare_for_coco_keypoint(predictions)
+ else:
+ raise ValueError("Unknown iou type {}".format(iou_type))
+
+ def prepare_for_coco_detection(self, predictions):
+ coco_results = []
+ for original_id, prediction in predictions.items():
+ if len(prediction) == 0:
+ continue
+
+ boxes = prediction["boxes"]
+ boxes = convert_to_xywh(boxes).tolist()
+ scores = prediction["scores"].tolist()
+ labels = prediction["labels"].tolist()
+
+ coco_results.extend(
+ [
+ {
+ "image_id": original_id,
+ "category_id": labels[k],
+ "bbox": box,
+ "score": scores[k],
+ }
+ for k, box in enumerate(boxes)
+ ]
+ )
+ return coco_results
+
+ def prepare_for_coco_segmentation(self, predictions):
+ coco_results = []
+ for original_id, prediction in predictions.items():
+ if len(prediction) == 0:
+ continue
+
+ scores = prediction["scores"]
+ labels = prediction["labels"]
+ masks = prediction["masks"]
+
+ masks = masks > 0.5
+
+ scores = prediction["scores"].tolist()
+ labels = prediction["labels"].tolist()
+
+ rles = [
+ mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks
+ ]
+ for rle in rles:
+ rle["counts"] = rle["counts"].decode("utf-8")
+
+ coco_results.extend(
+ [
+ {
+ "image_id": original_id,
+ "category_id": labels[k],
+ "segmentation": rle,
+ "score": scores[k],
+ }
+ for k, rle in enumerate(rles)
+ ]
+ )
+ return coco_results
+
+ def prepare_for_coco_keypoint(self, predictions):
+ coco_results = []
+ for original_id, prediction in predictions.items():
+ if len(prediction) == 0:
+ continue
+
+ boxes = prediction["boxes"]
+ boxes = convert_to_xywh(boxes).tolist()
+ scores = prediction["scores"].tolist()
+ labels = prediction["labels"].tolist()
+ keypoints = prediction["keypoints"]
+ keypoints = keypoints.flatten(start_dim=1).tolist()
+
+ coco_results.extend(
+ [
+ {
+ "image_id": original_id,
+ "category_id": labels[k],
+ "keypoints": keypoint,
+ "score": scores[k],
+ }
+ for k, keypoint in enumerate(keypoints)
+ ]
+ )
+ return coco_results
+
+
+def convert_to_xywh(boxes):
+ xmin, ymin, xmax, ymax = boxes.unbind(1)
+ return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
+
+
+def merge(img_ids, eval_imgs):
+ all_img_ids = all_gather(img_ids)
+ all_eval_imgs = all_gather(eval_imgs)
+
+ merged_img_ids = []
+ for p in all_img_ids:
+ merged_img_ids.extend(p)
+
+ merged_eval_imgs = []
+ for p in all_eval_imgs:
+ merged_eval_imgs.append(p)
+
+ merged_img_ids = np.array(merged_img_ids)
+ merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
+
+ # keep only unique (and in sorted order) images
+ merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
+ merged_eval_imgs = merged_eval_imgs[..., idx]
+
+ return merged_img_ids, merged_eval_imgs
+
+
+def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
+ img_ids, eval_imgs = merge(img_ids, eval_imgs)
+ img_ids = list(img_ids)
+ eval_imgs = list(eval_imgs.flatten())
+
+ coco_eval.evalImgs = eval_imgs
+ coco_eval.params.imgIds = img_ids
+ coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
+
+
+#################################################################
+# From pycocotools, just removed the prints and fixed
+# a Python3 bug about unicode not defined
+#################################################################
+
+
+def evaluate(self):
+ """
+ Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
+ :return: None
+ """
+ # tic = time.time()
+ # print('Running per image evaluation...')
+ p = self.params
+ # add backward compatibility if useSegm is specified in params
+ if p.useSegm is not None:
+ p.iouType = "segm" if p.useSegm == 1 else "bbox"
+ print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
+ # print('Evaluate annotation type *{}*'.format(p.iouType))
+ p.imgIds = list(np.unique(p.imgIds))
+ if p.useCats:
+ p.catIds = list(np.unique(p.catIds))
+ p.maxDets = sorted(p.maxDets)
+ self.params = p
+
+ self._prepare()
+ # loop through images, area range, max detection number
+ catIds = p.catIds if p.useCats else [-1]
+
+ if p.iouType == "segm" or p.iouType == "bbox":
+ computeIoU = self.computeIoU
+ elif p.iouType == "keypoints":
+ computeIoU = self.computeOks
+ self.ious = {(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds}
+
+ evaluateImg = self.evaluateImg
+ maxDet = p.maxDets[-1]
+ evalImgs = [
+ evaluateImg(imgId, catId, areaRng, maxDet) for catId in catIds for areaRng in p.areaRng for imgId in p.imgIds
+ ]
+ # this is NOT in the pycocotools code, but could be done outside
+ evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
+ self._paramsEval = copy.deepcopy(self.params)
+ # toc = time.time()
+ # print('DONE (t={:0.2f}s).'.format(toc-tic))
+ return p.imgIds, evalImgs
+
+
+#################################################################
+# end of straight copy from pycocotools, just removing the prints
+#################################################################
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/transforms.py b/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/transforms.py
new file mode 100644
index 00000000..39af8403
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/datasets/transforms.py
@@ -0,0 +1,298 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+Transforms and data augmentation for both image + bbox.
+"""
+import os
+import random
+
+import PIL
+import torch
+import torchvision.transforms as T
+import torchvision.transforms.functional as F
+from groundingdino.util.box_ops import box_xyxy_to_cxcywh
+from groundingdino.util.misc import interpolate
+
+
+def crop(image, target, region):
+ cropped_image = F.crop(image, *region)
+
+ target = target.copy()
+ i, j, h, w = region
+
+ # should we do something wrt the original size?
+ target["size"] = torch.tensor([h, w])
+
+ fields = ["labels", "area", "iscrowd", "positive_map"]
+
+ if "boxes" in target:
+ boxes = target["boxes"]
+ max_size = torch.as_tensor([w, h], dtype=torch.float32)
+ cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
+ cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
+ cropped_boxes = cropped_boxes.clamp(min=0)
+ area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
+ target["boxes"] = cropped_boxes.reshape(-1, 4)
+ target["area"] = area
+ fields.append("boxes")
+
+ if "masks" in target:
+ # FIXME should we update the area here if there are no boxes?
+ target["masks"] = target["masks"][:, i : i + h, j : j + w]
+ fields.append("masks")
+
+ # remove elements for which the boxes or masks that have zero area
+ if "boxes" in target or "masks" in target:
+ # favor boxes selection when defining which elements to keep
+ # this is compatible with previous implementation
+ if "boxes" in target:
+ cropped_boxes = target["boxes"].reshape(-1, 2, 2)
+ keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
+ else:
+ keep = target["masks"].flatten(1).any(1)
+
+ for field in fields:
+ if field in target:
+ target[field] = target[field][keep]
+
+ if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO":
+ # for debug and visualization only.
+ if "strings_positive" in target:
+ target["strings_positive"] = [_i for _i, _j in zip(target["strings_positive"], keep) if _j]
+
+ return cropped_image, target
+
+
+def hflip(image, target):
+ flipped_image = F.hflip(image)
+
+ w, h = image.size
+
+ target = target.copy()
+ if "boxes" in target:
+ boxes = target["boxes"]
+ boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
+ target["boxes"] = boxes
+
+ if "masks" in target:
+ target["masks"] = target["masks"].flip(-1)
+
+ return flipped_image, target
+
+
+def resize(image, target, size, max_size=None):
+ # size can be min_size (scalar) or (w, h) tuple
+
+ def get_size_with_aspect_ratio(image_size, size, max_size=None):
+ w, h = image_size
+ if max_size is not None:
+ min_original_size = float(min((w, h)))
+ max_original_size = float(max((w, h)))
+ if max_original_size / min_original_size * size > max_size:
+ size = int(round(max_size * min_original_size / max_original_size))
+
+ if (w <= h and w == size) or (h <= w and h == size):
+ return (h, w)
+
+ if w < h:
+ ow = size
+ oh = int(size * h / w)
+ else:
+ oh = size
+ ow = int(size * w / h)
+
+ return (oh, ow)
+
+ def get_size(image_size, size, max_size=None):
+ if isinstance(size, (list, tuple)):
+ return size[::-1]
+ else:
+ return get_size_with_aspect_ratio(image_size, size, max_size)
+
+ size = get_size(image.size, size, max_size)
+ rescaled_image = F.resize(image, size)
+
+ if target is None:
+ return rescaled_image, None
+
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
+ ratio_width, ratio_height = ratios
+
+ target = target.copy()
+ if "boxes" in target:
+ boxes = target["boxes"]
+ scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
+ target["boxes"] = scaled_boxes
+
+ if "area" in target:
+ area = target["area"]
+ scaled_area = area * (ratio_width * ratio_height)
+ target["area"] = scaled_area
+
+ h, w = size
+ target["size"] = torch.tensor([h, w])
+
+ if "masks" in target:
+ target["masks"] = interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5
+
+ return rescaled_image, target
+
+
+def pad(image, target, padding):
+ # assumes that we only pad on the bottom right corners
+ padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
+ if target is None:
+ return padded_image, None
+ target = target.copy()
+ # should we do something wrt the original size?
+ target["size"] = torch.tensor(padded_image.size[::-1])
+ if "masks" in target:
+ target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1]))
+ return padded_image, target
+
+
+class ResizeDebug(object):
+ def __init__(self, size):
+ self.size = size
+
+ def __call__(self, img, target):
+ return resize(img, target, self.size)
+
+
+class RandomCrop(object):
+ def __init__(self, size):
+ self.size = size
+
+ def __call__(self, img, target):
+ region = T.RandomCrop.get_params(img, self.size)
+ return crop(img, target, region)
+
+
+class RandomSizeCrop(object):
+ def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
+ # respect_boxes: True to keep all boxes
+ # False to tolerence box filter
+ self.min_size = min_size
+ self.max_size = max_size
+ self.respect_boxes = respect_boxes
+
+ def __call__(self, img: PIL.Image.Image, target: dict):
+ init_boxes = len(target["boxes"])
+ max_patience = 10
+ for i in range(max_patience):
+ w = random.randint(self.min_size, min(img.width, self.max_size))
+ h = random.randint(self.min_size, min(img.height, self.max_size))
+ region = T.RandomCrop.get_params(img, [h, w])
+ result_img, result_target = crop(img, target, region)
+ if not self.respect_boxes or len(result_target["boxes"]) == init_boxes or i == max_patience - 1:
+ return result_img, result_target
+ return result_img, result_target
+
+
+class CenterCrop(object):
+ def __init__(self, size):
+ self.size = size
+
+ def __call__(self, img, target):
+ image_width, image_height = img.size
+ crop_height, crop_width = self.size
+ crop_top = int(round((image_height - crop_height) / 2.0))
+ crop_left = int(round((image_width - crop_width) / 2.0))
+ return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
+
+
+class RandomHorizontalFlip(object):
+ def __init__(self, p=0.5):
+ self.p = p
+
+ def __call__(self, img, target):
+ if random.random() < self.p:
+ return hflip(img, target)
+ return img, target
+
+
+class RandomResize(object):
+ def __init__(self, sizes, max_size=None):
+ assert isinstance(sizes, (list, tuple))
+ self.sizes = sizes
+ self.max_size = max_size
+
+ def __call__(self, img, target=None):
+ size = random.choice(self.sizes)
+ return resize(img, target, size, self.max_size)
+
+
+class RandomPad(object):
+ def __init__(self, max_pad):
+ self.max_pad = max_pad
+
+ def __call__(self, img, target):
+ pad_x = random.randint(0, self.max_pad)
+ pad_y = random.randint(0, self.max_pad)
+ return pad(img, target, (pad_x, pad_y))
+
+
+class RandomSelect(object):
+ """
+ Randomly selects between transforms1 and transforms2,
+ with probability p for transforms1 and (1 - p) for transforms2
+ """
+
+ def __init__(self, transforms1, transforms2, p=0.5):
+ self.transforms1 = transforms1
+ self.transforms2 = transforms2
+ self.p = p
+
+ def __call__(self, img, target):
+ if random.random() < self.p:
+ return self.transforms1(img, target)
+ return self.transforms2(img, target)
+
+
+class ToTensor(object):
+ def __call__(self, img, target):
+ return F.to_tensor(img), target
+
+
+class RandomErasing(object):
+ def __init__(self, *args, **kwargs):
+ self.eraser = T.RandomErasing(*args, **kwargs)
+
+ def __call__(self, img, target):
+ return self.eraser(img), target
+
+
+class Normalize(object):
+ def __init__(self, mean, std):
+ self.mean = mean
+ self.std = std
+
+ def __call__(self, image, target=None):
+ image = F.normalize(image, mean=self.mean, std=self.std)
+ if target is None:
+ return image, None
+ target = target.copy()
+ h, w = image.shape[-2:]
+ if "boxes" in target:
+ boxes = target["boxes"]
+ boxes = box_xyxy_to_cxcywh(boxes)
+ boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
+ target["boxes"] = boxes
+ return image, target
+
+
+class Compose(object):
+ def __init__(self, transforms):
+ self.transforms = transforms
+
+ def __call__(self, image, target):
+ for t in self.transforms:
+ image, target = t(image, target)
+ return image, target
+
+ def __repr__(self):
+ format_string = self.__class__.__name__ + "("
+ for t in self.transforms:
+ format_string += "\n"
+ format_string += " {0}".format(t)
+ format_string += "\n)"
+ return format_string
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/__init__.py
new file mode 100644
index 00000000..2af819d6
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/__init__.py
@@ -0,0 +1,15 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Conditional DETR
+# Copyright (c) 2021 Microsoft. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copied from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# ------------------------------------------------------------------------
+
+from .groundingdino import build_groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/__init__.py
new file mode 100644
index 00000000..76e4b272
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/__init__.py
@@ -0,0 +1 @@
+from .backbone import build_backbone
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/backbone.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/backbone.py
new file mode 100644
index 00000000..3cd8702a
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/backbone.py
@@ -0,0 +1,213 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Conditional DETR
+# Copyright (c) 2021 Microsoft. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copied from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# ------------------------------------------------------------------------
+
+"""
+Backbone modules.
+"""
+
+from typing import Dict, List
+
+import torch
+import torch.nn.functional as F
+import torchvision
+from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process
+from torch import nn
+from torchvision.models._utils import IntermediateLayerGetter
+
+from .position_encoding import build_position_encoding
+from .swin_transformer import build_swin_transformer
+
+
+class FrozenBatchNorm2d(torch.nn.Module):
+ """
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
+
+ Copy-paste from torchvision.misc.ops with added eps before rqsrt,
+ without which any other models than torchvision.models.resnet[18,34,50,101]
+ produce nans.
+ """
+
+ def __init__(self, n):
+ super(FrozenBatchNorm2d, self).__init__()
+ self.register_buffer("weight", torch.ones(n))
+ self.register_buffer("bias", torch.zeros(n))
+ self.register_buffer("running_mean", torch.zeros(n))
+ self.register_buffer("running_var", torch.ones(n))
+
+ def _load_from_state_dict(
+ self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ ):
+ num_batches_tracked_key = prefix + "num_batches_tracked"
+ if num_batches_tracked_key in state_dict:
+ del state_dict[num_batches_tracked_key]
+
+ super(FrozenBatchNorm2d, self)._load_from_state_dict(
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ )
+
+ def forward(self, x):
+ # move reshapes to the beginning
+ # to make it fuser-friendly
+ w = self.weight.reshape(1, -1, 1, 1)
+ b = self.bias.reshape(1, -1, 1, 1)
+ rv = self.running_var.reshape(1, -1, 1, 1)
+ rm = self.running_mean.reshape(1, -1, 1, 1)
+ eps = 1e-5
+ scale = w * (rv + eps).rsqrt()
+ bias = b - rm * scale
+ return x * scale + bias
+
+
+class BackboneBase(nn.Module):
+ def __init__(
+ self,
+ backbone: nn.Module,
+ train_backbone: bool,
+ num_channels: int,
+ return_interm_indices: list,
+ ):
+ super().__init__()
+ for name, parameter in backbone.named_parameters():
+ if not train_backbone or "layer2" not in name and "layer3" not in name and "layer4" not in name:
+ parameter.requires_grad_(False)
+
+ return_layers = {}
+ for idx, layer_index in enumerate(return_interm_indices):
+ return_layers.update({"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)})
+
+ # if len:
+ # if use_stage1_feature:
+ # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
+ # else:
+ # return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
+ # else:
+ # return_layers = {'layer4': "0"}
+ self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
+ self.num_channels = num_channels
+
+ def forward(self, tensor_list: NestedTensor):
+ xs = self.body(tensor_list.tensors)
+ out: Dict[str, NestedTensor] = {}
+ for name, x in xs.items():
+ m = tensor_list.mask
+ assert m is not None
+ mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
+ out[name] = NestedTensor(x, mask)
+ # import ipdb; ipdb.set_trace()
+ return out
+
+
+class Backbone(BackboneBase):
+ """ResNet backbone with frozen BatchNorm."""
+
+ def __init__(
+ self,
+ name: str,
+ train_backbone: bool,
+ dilation: bool,
+ return_interm_indices: list,
+ batch_norm=FrozenBatchNorm2d,
+ ):
+ if name in ["resnet18", "resnet34", "resnet50", "resnet101"]:
+ backbone = getattr(torchvision.models, name)(
+ replace_stride_with_dilation=[False, False, dilation],
+ pretrained=is_main_process(),
+ norm_layer=batch_norm,
+ )
+ else:
+ raise NotImplementedError("Why you can get here with name {}".format(name))
+ # num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
+ assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
+ assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
+ num_channels_all = [256, 512, 1024, 2048]
+ num_channels = num_channels_all[4 - len(return_interm_indices) :]
+ super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
+
+
+class Joiner(nn.Sequential):
+ def __init__(self, backbone, position_embedding):
+ super().__init__(backbone, position_embedding)
+
+ def forward(self, tensor_list: NestedTensor):
+ xs = self[0](tensor_list)
+ out: List[NestedTensor] = []
+ pos = []
+ for name, x in xs.items():
+ out.append(x)
+ # position encoding
+ pos.append(self[1](x).to(x.tensors.dtype))
+
+ return out, pos
+
+
+def build_backbone(args):
+ """
+ Useful args:
+ - backbone: backbone name
+ - lr_backbone:
+ - dilation
+ - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
+ - backbone_freeze_keywords:
+ - use_checkpoint: for swin only for now
+
+ """
+ position_embedding = build_position_encoding(args)
+ train_backbone = True
+ if not train_backbone:
+ raise ValueError("Please set lr_backbone > 0")
+ return_interm_indices = args.return_interm_indices
+ assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
+ args.backbone_freeze_keywords
+ use_checkpoint = getattr(args, "use_checkpoint", False)
+
+ if args.backbone in ["resnet50", "resnet101"]:
+ backbone = Backbone(
+ args.backbone,
+ train_backbone,
+ args.dilation,
+ return_interm_indices,
+ batch_norm=FrozenBatchNorm2d,
+ )
+ bb_num_channels = backbone.num_channels
+ elif args.backbone in [
+ "swin_T_224_1k",
+ "swin_B_224_22k",
+ "swin_B_384_22k",
+ "swin_L_224_22k",
+ "swin_L_384_22k",
+ ]:
+ pretrain_img_size = int(args.backbone.split("_")[-2])
+ backbone = build_swin_transformer(
+ args.backbone,
+ pretrain_img_size=pretrain_img_size,
+ out_indices=tuple(return_interm_indices),
+ dilation=False,
+ use_checkpoint=use_checkpoint,
+ )
+
+ bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
+ else:
+ raise NotImplementedError("Unknown backbone {}".format(args.backbone))
+
+ assert len(bb_num_channels) == len(
+ return_interm_indices
+ ), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
+
+ model = Joiner(backbone, position_embedding)
+ model.num_channels = bb_num_channels
+ assert isinstance(bb_num_channels, List), "bb_num_channels is expected to be a List but {}".format(
+ type(bb_num_channels)
+ )
+ # import ipdb; ipdb.set_trace()
+ return model
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/position_encoding.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/position_encoding.py
new file mode 100644
index 00000000..25a91d0e
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/position_encoding.py
@@ -0,0 +1,175 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# DINO
+# Copyright (c) 2022 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Conditional DETR
+# Copyright (c) 2021 Microsoft. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copied from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# ------------------------------------------------------------------------
+
+"""
+Various positional encodings for the transformer.
+"""
+import math
+
+import torch
+from groundingdino.util.misc import NestedTensor
+from torch import nn
+
+
+class PositionEmbeddingSine(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention is all you need paper, generalized to work on images.
+ """
+
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
+ super().__init__()
+ self.num_pos_feats = num_pos_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, tensor_list: NestedTensor):
+ x = tensor_list.tensors
+ mask = tensor_list.mask
+ assert mask is not None
+ not_mask = ~mask
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ eps = 1e-6
+ # if os.environ.get("SHILONG_AMP", None) == '1':
+ # eps = 1e-4
+ # else:
+ # eps = 1e-6
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+
+class PositionEmbeddingSineHW(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention is all you need paper, generalized to work on images.
+ """
+
+ def __init__(self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None):
+ super().__init__()
+ self.num_pos_feats = num_pos_feats
+ self.temperatureH = temperatureH
+ self.temperatureW = temperatureW
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, tensor_list: NestedTensor):
+ x = tensor_list.tensors
+ mask = tensor_list.mask
+ assert mask is not None
+ not_mask = ~mask
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
+
+ # import ipdb; ipdb.set_trace()
+
+ if self.normalize:
+ eps = 1e-6
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode="floor")) / self.num_pos_feats)
+ pos_x = x_embed[:, :, :, None] / dim_tx
+
+ dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode="floor")) / self.num_pos_feats)
+ pos_y = y_embed[:, :, :, None] / dim_ty
+
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+
+ # import ipdb; ipdb.set_trace()
+
+ return pos
+
+
+class PositionEmbeddingLearned(nn.Module):
+ """
+ Absolute pos embedding, learned.
+ """
+
+ def __init__(self, num_pos_feats=256):
+ super().__init__()
+ self.row_embed = nn.Embedding(50, num_pos_feats)
+ self.col_embed = nn.Embedding(50, num_pos_feats)
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ nn.init.uniform_(self.row_embed.weight)
+ nn.init.uniform_(self.col_embed.weight)
+
+ def forward(self, tensor_list: NestedTensor):
+ x = tensor_list.tensors
+ h, w = x.shape[-2:]
+ i = torch.arange(w, device=x.device)
+ j = torch.arange(h, device=x.device)
+ x_emb = self.col_embed(i)
+ y_emb = self.row_embed(j)
+ pos = (
+ torch.cat(
+ [
+ x_emb.unsqueeze(0).repeat(h, 1, 1),
+ y_emb.unsqueeze(1).repeat(1, w, 1),
+ ],
+ dim=-1,
+ )
+ .permute(2, 0, 1)
+ .unsqueeze(0)
+ .repeat(x.shape[0], 1, 1, 1)
+ )
+ return pos
+
+
+def build_position_encoding(args):
+ N_steps = args.hidden_dim // 2
+ if args.position_embedding in ("v2", "sine"):
+ # TODO find a better way of exposing other arguments
+ position_embedding = PositionEmbeddingSineHW(
+ N_steps,
+ temperatureH=args.pe_temperatureH,
+ temperatureW=args.pe_temperatureW,
+ normalize=True,
+ )
+ elif args.position_embedding in ("v3", "learned"):
+ position_embedding = PositionEmbeddingLearned(N_steps)
+ else:
+ raise ValueError(f"not supported {args.position_embedding}")
+
+ return position_embedding
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/swin_transformer.py
new file mode 100644
index 00000000..683779d8
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/backbone/swin_transformer.py
@@ -0,0 +1,765 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# DINO
+# Copyright (c) 2022 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# --------------------------------------------------------
+# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
+# --------------------------------------------------------
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+from groundingdino.util.misc import NestedTensor
+from timm.models.layers import DropPath, to_2tuple, trunc_normal_
+
+
+class Mlp(nn.Module):
+ """Multilayer perceptron."""
+
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
+
+
+class WindowAttention(nn.Module):
+ """Window based multi-head self attention (W-MSA) module with relative position bias.
+ It supports both of shifted and non-shifted window.
+ Args:
+ dim (int): Number of input channels.
+ window_size (tuple[int]): The height and width of the window.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
+ """
+
+ def __init__(
+ self,
+ dim,
+ window_size,
+ num_heads,
+ qkv_bias=True,
+ qk_scale=None,
+ attn_drop=0.0,
+ proj_drop=0.0,
+ ):
+
+ super().__init__()
+ self.dim = dim
+ self.window_size = window_size # Wh, Ww
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = qk_scale or head_dim**-0.5
+
+ # define a parameter table of relative position bias
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
+ ) # 2*Wh-1 * 2*Ww-1, nH
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ trunc_normal_(self.relative_position_bias_table, std=0.02)
+ self.softmax = nn.Softmax(dim=-1)
+
+ def forward(self, x, mask=None):
+ """Forward function.
+ Args:
+ x: input features with shape of (num_windows*B, N, C)
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
+ """
+ B_, N, C = x.shape
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
+
+ q = q * self.scale
+ attn = q @ k.transpose(-2, -1)
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
+ ) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0)
+
+ if mask is not None:
+ nW = mask.shape[0]
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.view(-1, self.num_heads, N, N)
+ attn = self.softmax(attn)
+ else:
+ attn = self.softmax(attn)
+
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SwinTransformerBlock(nn.Module):
+ """Swin Transformer Block.
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ window_size (int): Window size.
+ shift_size (int): Shift size for SW-MSA.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(
+ self,
+ dim,
+ num_heads,
+ window_size=7,
+ shift_size=0,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.0,
+ attn_drop=0.0,
+ drop_path=0.0,
+ act_layer=nn.GELU,
+ norm_layer=nn.LayerNorm,
+ ):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.shift_size = shift_size
+ self.mlp_ratio = mlp_ratio
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
+
+ self.norm1 = norm_layer(dim)
+ self.attn = WindowAttention(
+ dim,
+ window_size=to_2tuple(self.window_size),
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop,
+ )
+
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ self.H = None
+ self.W = None
+
+ def forward(self, x, mask_matrix):
+ """Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ mask_matrix: Attention mask for cyclic shift.
+ """
+ B, L, C = x.shape
+ H, W = self.H, self.W
+ assert L == H * W, "input feature has wrong size"
+
+ shortcut = x
+ x = self.norm1(x)
+ x = x.view(B, H, W, C)
+
+ # pad feature maps to multiples of window size
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
+ _, Hp, Wp, _ = x.shape
+
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ attn_mask = mask_matrix
+ else:
+ shifted_x = x
+ attn_mask = None
+
+ # partition windows
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ x = shifted_x
+
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :].contiguous()
+
+ x = x.view(B, H * W, C)
+
+ # FFN
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ return x
+
+
+class PatchMerging(nn.Module):
+ """Patch Merging Layer
+ Args:
+ dim (int): Number of input channels.
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def forward(self, x, H, W):
+ """Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+ B, L, C = x.shape
+ assert L == H * W, "input feature has wrong size"
+
+ x = x.view(B, H, W, C)
+
+ # padding
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
+ if pad_input:
+ x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
+
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
+
+ x = self.norm(x)
+ x = self.reduction(x)
+
+ return x
+
+
+class BasicLayer(nn.Module):
+ """A basic Swin Transformer layer for one stage.
+ Args:
+ dim (int): Number of feature channels
+ depth (int): Depths of this stage.
+ num_heads (int): Number of attention head.
+ window_size (int): Local window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ """
+
+ def __init__(
+ self,
+ dim,
+ depth,
+ num_heads,
+ window_size=7,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.0,
+ attn_drop=0.0,
+ drop_path=0.0,
+ norm_layer=nn.LayerNorm,
+ downsample=None,
+ use_checkpoint=False,
+ ):
+ super().__init__()
+ self.window_size = window_size
+ self.shift_size = window_size // 2
+ self.depth = depth
+ self.use_checkpoint = use_checkpoint
+
+ # build blocks
+ self.blocks = nn.ModuleList(
+ [
+ SwinTransformerBlock(
+ dim=dim,
+ num_heads=num_heads,
+ window_size=window_size,
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ norm_layer=norm_layer,
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(dim=dim, norm_layer=norm_layer)
+ else:
+ self.downsample = None
+
+ def forward(self, x, H, W):
+ """Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+
+ # calculate attention mask for SW-MSA
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
+ img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
+ h_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ w_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+
+ for blk in self.blocks:
+ blk.H, blk.W = H, W
+ if self.use_checkpoint:
+ x = checkpoint.checkpoint(blk, x, attn_mask)
+ else:
+ x = blk(x, attn_mask)
+ if self.downsample is not None:
+ x_down = self.downsample(x, H, W)
+ Wh, Ww = (H + 1) // 2, (W + 1) // 2
+ return x, H, W, x_down, Wh, Ww
+ else:
+ return x, H, W, x, H, W
+
+
+class PatchEmbed(nn.Module):
+ """Image to Patch Embedding
+ Args:
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
+ super().__init__()
+ patch_size = to_2tuple(patch_size)
+ self.patch_size = patch_size
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+ if norm_layer is not None:
+ self.norm = norm_layer(embed_dim)
+ else:
+ self.norm = None
+
+ def forward(self, x):
+ """Forward function."""
+ # padding
+ _, _, H, W = x.size()
+ if W % self.patch_size[1] != 0:
+ x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
+ if H % self.patch_size[0] != 0:
+ x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
+
+ x = self.proj(x) # B C Wh Ww
+ if self.norm is not None:
+ Wh, Ww = x.size(2), x.size(3)
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
+
+ return x
+
+
+class SwinTransformer(nn.Module):
+ """Swin Transformer backbone.
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
+ https://arxiv.org/pdf/2103.14030
+ Args:
+ pretrain_img_size (int): Input image size for training the pretrained model,
+ used in absolute postion embedding. Default 224.
+ patch_size (int | tuple(int)): Patch size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ depths (tuple[int]): Depths of each Swin Transformer stage.
+ num_heads (tuple[int]): Number of attention head of each stage.
+ window_size (int): Window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
+ drop_rate (float): Dropout rate.
+ attn_drop_rate (float): Attention dropout rate. Default: 0.
+ drop_path_rate (float): Stochastic depth rate. Default: 0.2.
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ dilation (bool): if True, the output size if 16x downsample, ow 32x downsample.
+ """
+
+ def __init__(
+ self,
+ pretrain_img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ qk_scale=None,
+ drop_rate=0.0,
+ attn_drop_rate=0.0,
+ drop_path_rate=0.2,
+ norm_layer=nn.LayerNorm,
+ ape=False,
+ patch_norm=True,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ dilation=False,
+ use_checkpoint=False,
+ ):
+ super().__init__()
+
+ self.pretrain_img_size = pretrain_img_size
+ self.num_layers = len(depths)
+ self.embed_dim = embed_dim
+ self.ape = ape
+ self.patch_norm = patch_norm
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+ self.dilation = dilation
+
+ # if use_checkpoint:
+ # print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!")
+
+ # split image into non-overlapping patches
+ self.patch_embed = PatchEmbed(
+ patch_size=patch_size,
+ in_chans=in_chans,
+ embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None,
+ )
+
+ # absolute position embedding
+ if self.ape:
+ pretrain_img_size = to_2tuple(pretrain_img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [
+ pretrain_img_size[0] // patch_size[0],
+ pretrain_img_size[1] // patch_size[1],
+ ]
+
+ self.absolute_pos_embed = nn.Parameter(
+ torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
+ )
+ trunc_normal_(self.absolute_pos_embed, std=0.02)
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ # stochastic depth
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
+
+ # build layers
+ self.layers = nn.ModuleList()
+ # prepare downsample list
+ downsamplelist = [PatchMerging for i in range(self.num_layers)]
+ downsamplelist[-1] = None
+ num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
+ if self.dilation:
+ downsamplelist[-2] = None
+ num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2
+ for i_layer in range(self.num_layers):
+ layer = BasicLayer(
+ # dim=int(embed_dim * 2 ** i_layer),
+ dim=num_features[i_layer],
+ depth=depths[i_layer],
+ num_heads=num_heads[i_layer],
+ window_size=window_size,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
+ norm_layer=norm_layer,
+ # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
+ downsample=downsamplelist[i_layer],
+ use_checkpoint=use_checkpoint,
+ )
+ self.layers.append(layer)
+
+ # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
+ self.num_features = num_features
+
+ # add a norm layer for each output
+ for i_layer in out_indices:
+ layer = norm_layer(num_features[i_layer])
+ layer_name = f"norm{i_layer}"
+ self.add_module(layer_name, layer)
+
+ self._freeze_stages()
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ self.patch_embed.eval()
+ for param in self.patch_embed.parameters():
+ param.requires_grad = False
+
+ if self.frozen_stages >= 1 and self.ape:
+ self.absolute_pos_embed.requires_grad = False
+
+ if self.frozen_stages >= 2:
+ self.pos_drop.eval()
+ for i in range(0, self.frozen_stages - 1):
+ m = self.layers[i]
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ # def init_weights(self, pretrained=None):
+ # """Initialize the weights in backbone.
+ # Args:
+ # pretrained (str, optional): Path to pre-trained weights.
+ # Defaults to None.
+ # """
+
+ # def _init_weights(m):
+ # if isinstance(m, nn.Linear):
+ # trunc_normal_(m.weight, std=.02)
+ # if isinstance(m, nn.Linear) and m.bias is not None:
+ # nn.init.constant_(m.bias, 0)
+ # elif isinstance(m, nn.LayerNorm):
+ # nn.init.constant_(m.bias, 0)
+ # nn.init.constant_(m.weight, 1.0)
+
+ # if isinstance(pretrained, str):
+ # self.apply(_init_weights)
+ # logger = get_root_logger()
+ # load_checkpoint(self, pretrained, strict=False, logger=logger)
+ # elif pretrained is None:
+ # self.apply(_init_weights)
+ # else:
+ # raise TypeError('pretrained must be a str or None')
+
+ def forward_raw(self, x):
+ """Forward function."""
+ x = self.patch_embed(x)
+
+ Wh, Ww = x.size(2), x.size(3)
+ if self.ape:
+ # interpolate the position embedding to the corresponding size
+ absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic")
+ x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
+ else:
+ x = x.flatten(2).transpose(1, 2)
+ x = self.pos_drop(x)
+
+ outs = []
+ for i in range(self.num_layers):
+ layer = self.layers[i]
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
+ # import ipdb; ipdb.set_trace()
+
+ if i in self.out_indices:
+ norm_layer = getattr(self, f"norm{i}")
+ x_out = norm_layer(x_out)
+
+ out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
+ outs.append(out)
+ # in:
+ # torch.Size([2, 3, 1024, 1024])
+ # outs:
+ # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
+ # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
+ return tuple(outs)
+
+ def forward(self, tensor_list: NestedTensor):
+ x = tensor_list.tensors
+
+ """Forward function."""
+ x = self.patch_embed(x)
+
+ Wh, Ww = x.size(2), x.size(3)
+ if self.ape:
+ # interpolate the position embedding to the corresponding size
+ absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic")
+ x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
+ else:
+ x = x.flatten(2).transpose(1, 2)
+ x = self.pos_drop(x)
+
+ outs = []
+ for i in range(self.num_layers):
+ layer = self.layers[i]
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
+
+ if i in self.out_indices:
+ norm_layer = getattr(self, f"norm{i}")
+ x_out = norm_layer(x_out)
+
+ out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
+ outs.append(out)
+ # in:
+ # torch.Size([2, 3, 1024, 1024])
+ # out:
+ # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
+ # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
+
+ # collect for nesttensors
+ outs_dict = {}
+ for idx, out_i in enumerate(outs):
+ m = tensor_list.mask
+ assert m is not None
+ mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]
+ outs_dict[idx] = NestedTensor(out_i, mask)
+
+ return outs_dict
+
+ def train(self, mode=True):
+ """Convert the model into training mode while keep layers freezed."""
+ super(SwinTransformer, self).train(mode)
+ self._freeze_stages()
+
+
+def build_swin_transformer(modelname, pretrain_img_size, **kw):
+ assert modelname in [
+ "swin_T_224_1k",
+ "swin_B_224_22k",
+ "swin_B_384_22k",
+ "swin_L_224_22k",
+ "swin_L_384_22k",
+ ]
+
+ model_para_dict = {
+ "swin_T_224_1k": dict(embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7),
+ "swin_B_224_22k": dict(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7),
+ "swin_B_384_22k": dict(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12),
+ "swin_L_224_22k": dict(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7),
+ "swin_L_384_22k": dict(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12),
+ }
+ kw_cgf = model_para_dict[modelname]
+ kw_cgf.update(kw)
+ model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)
+ return model
+
+
+if __name__ == "__main__":
+ model = build_swin_transformer("swin_L_384_22k", 384, dilation=True)
+ x = torch.rand(2, 3, 1024, 1024)
+ y = model.forward_raw(x)
+ import ipdb
+
+ ipdb.set_trace()
+ x = torch.rand(2, 3, 384, 384)
+ y = model.forward_raw(x)
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/bertwarper.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/bertwarper.py
new file mode 100644
index 00000000..0acfc06b
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/bertwarper.py
@@ -0,0 +1,258 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+from torch import Tensor, nn
+from torchvision.ops.boxes import nms
+from transformers import BertConfig, BertModel, BertPreTrainedModel
+from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
+
+
+class BertModelWarper(nn.Module):
+ def __init__(self, bert_model):
+ super().__init__()
+ # self.bert = bert_modelc
+
+ self.config = bert_model.config
+ self.embeddings = bert_model.embeddings
+ self.encoder = bert_model.encoder
+ self.pooler = bert_model.pooler
+
+ self.get_extended_attention_mask = bert_model.get_extended_attention_mask
+ self.invert_attention_mask = bert_model.invert_attention_mask
+ self.get_head_mask = bert_model.get_head_mask
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
+ use_cache (:obj:`bool`, `optional`):
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
+ decoding (see :obj:`past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ batch_size, seq_length = input_shape
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ batch_size, seq_length = input_shape
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+ # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
+ # import ipdb; ipdb.set_trace()
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+class TextEncoderShell(nn.Module):
+ def __init__(self, text_encoder):
+ super().__init__()
+ self.text_encoder = text_encoder
+ self.config = self.text_encoder.config
+
+ def forward(self, **kw):
+ # feed into text encoder
+ return self.text_encoder(**kw)
+
+
+def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
+ """Generate attention mask between each pair of special tokens
+ Args:
+ input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
+ special_tokens_mask (list): special tokens mask.
+ Returns:
+ torch.Tensor: attention mask between each special tokens.
+ """
+ input_ids = tokenized["input_ids"]
+ bs, num_token = input_ids.shape
+ # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
+ special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
+ for special_token in special_tokens_list:
+ special_tokens_mask |= input_ids == special_token
+
+ # idxs: each row is a list of indices of special tokens
+ idxs = torch.nonzero(special_tokens_mask)
+
+ # generate attention mask and positional ids
+ attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
+ position_ids = torch.zeros((bs, num_token), device=input_ids.device)
+ previous_col = 0
+ for i in range(idxs.shape[0]):
+ row, col = idxs[i]
+ if (col == 0) or (col == num_token - 1):
+ attention_mask[row, col, col] = True
+ position_ids[row, col] = 0
+ else:
+ attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
+ position_ids[row, previous_col + 1 : col + 1] = torch.arange(
+ 0, col - previous_col, device=input_ids.device
+ )
+
+ previous_col = col
+
+ # # padding mask
+ # padding_mask = tokenized['attention_mask']
+ # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
+
+ return attention_mask, position_ids.to(torch.long)
+
+
+def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
+ """Generate attention mask between each pair of special tokens
+ Args:
+ input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
+ special_tokens_mask (list): special tokens mask.
+ Returns:
+ torch.Tensor: attention mask between each special tokens.
+ """
+ input_ids = tokenized["input_ids"]
+ bs, num_token = input_ids.shape
+ # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
+ special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
+ for special_token in special_tokens_list:
+ special_tokens_mask |= input_ids == special_token
+
+ # idxs: each row is a list of indices of special tokens
+ idxs = torch.nonzero(special_tokens_mask)
+
+ # generate attention mask and positional ids
+ attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
+ position_ids = torch.zeros((bs, num_token), device=input_ids.device)
+ cate_to_token_mask_list = [[] for _ in range(bs)]
+ previous_col = 0
+ for i in range(idxs.shape[0]):
+ row, col = idxs[i]
+ if (col == 0) or (col == num_token - 1):
+ attention_mask[row, col, col] = True
+ position_ids[row, col] = 0
+ else:
+ attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
+ position_ids[row, previous_col + 1 : col + 1] = torch.arange(
+ 0, col - previous_col, device=input_ids.device
+ )
+ c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
+ c2t_maski[previous_col + 1 : col] = True
+ cate_to_token_mask_list[row].append(c2t_maski)
+ previous_col = col
+
+ cate_to_token_mask_list = [
+ torch.stack(cate_to_token_mask_listi, dim=0) for cate_to_token_mask_listi in cate_to_token_mask_list
+ ]
+
+ # # padding mask
+ # padding_mask = tokenized['attention_mask']
+ # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
+
+ return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h
new file mode 100644
index 00000000..17df7221
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h
@@ -0,0 +1,64 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#pragma once
+
+#include "ms_deform_attn_cpu.h"
+
+#ifdef WITH_CUDA
+#include "ms_deform_attn_cuda.h"
+#endif
+
+namespace groundingdino {
+
+at::Tensor
+ms_deform_attn_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step)
+{
+ if (value.type().is_cuda())
+ {
+#ifdef WITH_CUDA
+ return ms_deform_attn_cuda_forward(
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
+#else
+ AT_ERROR("Not compiled with GPU support");
+#endif
+ }
+ AT_ERROR("Not implemented on the CPU");
+}
+
+std::vector
+ms_deform_attn_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step)
+{
+ if (value.type().is_cuda())
+ {
+#ifdef WITH_CUDA
+ return ms_deform_attn_cuda_backward(
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
+#else
+ AT_ERROR("Not compiled with GPU support");
+#endif
+ }
+ AT_ERROR("Not implemented on the CPU");
+}
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
new file mode 100644
index 00000000..8e7642ad
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
@@ -0,0 +1,43 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#include
+
+#include
+#include
+
+namespace groundingdino {
+
+at::Tensor
+ms_deform_attn_cpu_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step)
+{
+ AT_ERROR("Not implement on cpu");
+}
+
+std::vector
+ms_deform_attn_cpu_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step)
+{
+ AT_ERROR("Not implement on cpu");
+}
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
new file mode 100644
index 00000000..f3602619
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
@@ -0,0 +1,35 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#pragma once
+#include
+
+namespace groundingdino {
+
+at::Tensor
+ms_deform_attn_cpu_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step);
+
+std::vector
+ms_deform_attn_cpu_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step);
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.cu b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.cu
new file mode 100644
index 00000000..61aa8e49
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.cu
@@ -0,0 +1,156 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#include
+#include "ms_deform_im2col_cuda.cuh"
+
+#include
+#include
+#include
+#include
+
+namespace groundingdino {
+
+at::Tensor ms_deform_attn_cuda_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step)
+{
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
+
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
+
+ const int batch = value.size(0);
+ const int spatial_size = value.size(1);
+ const int num_heads = value.size(2);
+ const int channels = value.size(3);
+
+ const int num_levels = spatial_shapes.size(0);
+
+ const int num_query = sampling_loc.size(1);
+ const int num_point = sampling_loc.size(4);
+
+ const int im2col_step_ = std::min(batch, im2col_step);
+
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
+
+ auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
+
+ const int batch_n = im2col_step_;
+ auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
+ auto per_value_size = spatial_size * num_heads * channels;
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
+ for (int n = 0; n < batch/im2col_step_; ++n)
+ {
+ auto columns = output_n.select(0, n);
+ AT_DISPATCH_FLOATING_TYPES(value.scalar_type(), "ms_deform_attn_forward_cuda", ([&] {
+ ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
+ value.data() + n * im2col_step_ * per_value_size,
+ spatial_shapes.data(),
+ level_start_index.data(),
+ sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
+ attn_weight.data() + n * im2col_step_ * per_attn_weight_size,
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
+ columns.data());
+
+ }));
+ }
+
+ output = output.view({batch, num_query, num_heads*channels});
+
+ return output;
+}
+
+
+std::vector ms_deform_attn_cuda_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step)
+{
+
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
+ AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
+
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
+ AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
+
+ const int batch = value.size(0);
+ const int spatial_size = value.size(1);
+ const int num_heads = value.size(2);
+ const int channels = value.size(3);
+
+ const int num_levels = spatial_shapes.size(0);
+
+ const int num_query = sampling_loc.size(1);
+ const int num_point = sampling_loc.size(4);
+
+ const int im2col_step_ = std::min(batch, im2col_step);
+
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
+
+ auto grad_value = at::zeros_like(value);
+ auto grad_sampling_loc = at::zeros_like(sampling_loc);
+ auto grad_attn_weight = at::zeros_like(attn_weight);
+
+ const int batch_n = im2col_step_;
+ auto per_value_size = spatial_size * num_heads * channels;
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
+ auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
+
+ for (int n = 0; n < batch/im2col_step_; ++n)
+ {
+ auto grad_output_g = grad_output_n.select(0, n);
+ AT_DISPATCH_FLOATING_TYPES(value.scalar_type(), "ms_deform_attn_backward_cuda", ([&] {
+ ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
+ grad_output_g.data(),
+ value.data() + n * im2col_step_ * per_value_size,
+ spatial_shapes.data(),
+ level_start_index.data(),
+ sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
+ attn_weight.data() + n * im2col_step_ * per_attn_weight_size,
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
+ grad_value.data() + n * im2col_step_ * per_value_size,
+ grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
+ grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size);
+
+ }));
+ }
+
+ return {
+ grad_value, grad_sampling_loc, grad_attn_weight
+ };
+}
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h
new file mode 100644
index 00000000..b5bbb147
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h
@@ -0,0 +1,33 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#pragma once
+#include
+
+namespace groundingdino {
+
+at::Tensor ms_deform_attn_cuda_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step);
+
+std::vector ms_deform_attn_cuda_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step);
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_im2col_cuda.cuh b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_im2col_cuda.cuh
new file mode 100644
index 00000000..e8c08516
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_im2col_cuda.cuh
@@ -0,0 +1,1327 @@
+/*!
+**************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************
+* Modified from DCN (https://github.com/msracver/Deformable-ConvNets)
+* Copyright (c) 2018 Microsoft
+**************************************************************************
+*/
+
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+#define CUDA_KERNEL_LOOP(i, n) \
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
+ i < (n); \
+ i += blockDim.x * gridDim.x)
+
+const int CUDA_NUM_THREADS = 1024;
+inline int GET_BLOCKS(const int N, const int num_threads)
+{
+ return (N + num_threads - 1) / num_threads;
+}
+
+
+template
+__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
+ const int &height, const int &width, const int &nheads, const int &channels,
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c)
+{
+ const int h_low = floor(h);
+ const int w_low = floor(w);
+ const int h_high = h_low + 1;
+ const int w_high = w_low + 1;
+
+ const scalar_t lh = h - h_low;
+ const scalar_t lw = w - w_low;
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
+
+ const int w_stride = nheads * channels;
+ const int h_stride = width * w_stride;
+ const int h_low_ptr_offset = h_low * h_stride;
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
+ const int w_low_ptr_offset = w_low * w_stride;
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
+ const int base_ptr = m * channels + c;
+
+ scalar_t v1 = 0;
+ if (h_low >= 0 && w_low >= 0)
+ {
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
+ v1 = bottom_data[ptr1];
+ }
+ scalar_t v2 = 0;
+ if (h_low >= 0 && w_high <= width - 1)
+ {
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
+ v2 = bottom_data[ptr2];
+ }
+ scalar_t v3 = 0;
+ if (h_high <= height - 1 && w_low >= 0)
+ {
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
+ v3 = bottom_data[ptr3];
+ }
+ scalar_t v4 = 0;
+ if (h_high <= height - 1 && w_high <= width - 1)
+ {
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
+ v4 = bottom_data[ptr4];
+ }
+
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
+
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+ return val;
+}
+
+
+template
+__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
+ const int &height, const int &width, const int &nheads, const int &channels,
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
+ const scalar_t &top_grad,
+ const scalar_t &attn_weight,
+ scalar_t* &grad_value,
+ scalar_t* grad_sampling_loc,
+ scalar_t* grad_attn_weight)
+{
+ const int h_low = floor(h);
+ const int w_low = floor(w);
+ const int h_high = h_low + 1;
+ const int w_high = w_low + 1;
+
+ const scalar_t lh = h - h_low;
+ const scalar_t lw = w - w_low;
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
+
+ const int w_stride = nheads * channels;
+ const int h_stride = width * w_stride;
+ const int h_low_ptr_offset = h_low * h_stride;
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
+ const int w_low_ptr_offset = w_low * w_stride;
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
+ const int base_ptr = m * channels + c;
+
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
+ const scalar_t top_grad_value = top_grad * attn_weight;
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
+
+ scalar_t v1 = 0;
+ if (h_low >= 0 && w_low >= 0)
+ {
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
+ v1 = bottom_data[ptr1];
+ grad_h_weight -= hw * v1;
+ grad_w_weight -= hh * v1;
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
+ }
+ scalar_t v2 = 0;
+ if (h_low >= 0 && w_high <= width - 1)
+ {
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
+ v2 = bottom_data[ptr2];
+ grad_h_weight -= lw * v2;
+ grad_w_weight += hh * v2;
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
+ }
+ scalar_t v3 = 0;
+ if (h_high <= height - 1 && w_low >= 0)
+ {
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
+ v3 = bottom_data[ptr3];
+ grad_h_weight += hw * v3;
+ grad_w_weight -= lh * v3;
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
+ }
+ scalar_t v4 = 0;
+ if (h_high <= height - 1 && w_high <= width - 1)
+ {
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
+ v4 = bottom_data[ptr4];
+ grad_h_weight += lw * v4;
+ grad_w_weight += lh * v4;
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
+ }
+
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+ *grad_attn_weight = top_grad * val;
+ *grad_sampling_loc = width * grad_w_weight * top_grad_value;
+ *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
+}
+
+
+template
+__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
+ const int &height, const int &width, const int &nheads, const int &channels,
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
+ const scalar_t &top_grad,
+ const scalar_t &attn_weight,
+ scalar_t* &grad_value,
+ scalar_t* grad_sampling_loc,
+ scalar_t* grad_attn_weight)
+{
+ const int h_low = floor(h);
+ const int w_low = floor(w);
+ const int h_high = h_low + 1;
+ const int w_high = w_low + 1;
+
+ const scalar_t lh = h - h_low;
+ const scalar_t lw = w - w_low;
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
+
+ const int w_stride = nheads * channels;
+ const int h_stride = width * w_stride;
+ const int h_low_ptr_offset = h_low * h_stride;
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
+ const int w_low_ptr_offset = w_low * w_stride;
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
+ const int base_ptr = m * channels + c;
+
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
+ const scalar_t top_grad_value = top_grad * attn_weight;
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
+
+ scalar_t v1 = 0;
+ if (h_low >= 0 && w_low >= 0)
+ {
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
+ v1 = bottom_data[ptr1];
+ grad_h_weight -= hw * v1;
+ grad_w_weight -= hh * v1;
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
+ }
+ scalar_t v2 = 0;
+ if (h_low >= 0 && w_high <= width - 1)
+ {
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
+ v2 = bottom_data[ptr2];
+ grad_h_weight -= lw * v2;
+ grad_w_weight += hh * v2;
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
+ }
+ scalar_t v3 = 0;
+ if (h_high <= height - 1 && w_low >= 0)
+ {
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
+ v3 = bottom_data[ptr3];
+ grad_h_weight += hw * v3;
+ grad_w_weight -= lh * v3;
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
+ }
+ scalar_t v4 = 0;
+ if (h_high <= height - 1 && w_high <= width - 1)
+ {
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
+ v4 = bottom_data[ptr4];
+ grad_h_weight += lw * v4;
+ grad_w_weight += lh * v4;
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
+ }
+
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+ atomicAdd(grad_attn_weight, top_grad * val);
+ atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
+ atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
+}
+
+
+template
+__global__ void ms_deformable_im2col_gpu_kernel(const int n,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *data_col)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ scalar_t *data_col_ptr = data_col + index;
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+ scalar_t col = 0;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
+ }
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ }
+ }
+ *data_col_ptr = col;
+ }
+}
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
+ unsigned int tid = threadIdx.x;
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
+ *(cache_grad_attn_weight+threadIdx.x)=0;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
+ }
+
+ __syncthreads();
+ if (tid == 0)
+ {
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
+ int sid=2;
+ for (unsigned int tid = 1; tid < blockSize; ++tid)
+ {
+ _grad_w += cache_grad_sampling_loc[sid];
+ _grad_h += cache_grad_sampling_loc[sid + 1];
+ _grad_a += cache_grad_attn_weight[tid];
+ sid += 2;
+ }
+
+
+ *grad_sampling_loc = _grad_w;
+ *(grad_sampling_loc + 1) = _grad_h;
+ *grad_attn_weight = _grad_a;
+ }
+ __syncthreads();
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
+ unsigned int tid = threadIdx.x;
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
+ *(cache_grad_attn_weight+threadIdx.x)=0;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
+ }
+
+ __syncthreads();
+
+ for (unsigned int s=blockSize/2; s>0; s>>=1)
+ {
+ if (tid < s) {
+ const unsigned int xid1 = tid << 1;
+ const unsigned int xid2 = (tid + s) << 1;
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
+ }
+ __syncthreads();
+ }
+
+ if (tid == 0)
+ {
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
+ *grad_attn_weight = cache_grad_attn_weight[0];
+ }
+ __syncthreads();
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ extern __shared__ int _s[];
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
+ unsigned int tid = threadIdx.x;
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
+ *(cache_grad_attn_weight+threadIdx.x)=0;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
+ }
+
+ __syncthreads();
+ if (tid == 0)
+ {
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
+ int sid=2;
+ for (unsigned int tid = 1; tid < blockDim.x; ++tid)
+ {
+ _grad_w += cache_grad_sampling_loc[sid];
+ _grad_h += cache_grad_sampling_loc[sid + 1];
+ _grad_a += cache_grad_attn_weight[tid];
+ sid += 2;
+ }
+
+
+ *grad_sampling_loc = _grad_w;
+ *(grad_sampling_loc + 1) = _grad_h;
+ *grad_attn_weight = _grad_a;
+ }
+ __syncthreads();
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ extern __shared__ int _s[];
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
+ unsigned int tid = threadIdx.x;
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
+ *(cache_grad_attn_weight+threadIdx.x)=0;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
+ }
+
+ __syncthreads();
+
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
+ {
+ if (tid < s) {
+ const unsigned int xid1 = tid << 1;
+ const unsigned int xid2 = (tid + s) << 1;
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
+ if (tid + (s << 1) < spre)
+ {
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
+ }
+ }
+ __syncthreads();
+ }
+
+ if (tid == 0)
+ {
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
+ *grad_attn_weight = cache_grad_attn_weight[0];
+ }
+ __syncthreads();
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ extern __shared__ int _s[];
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
+ unsigned int tid = threadIdx.x;
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
+ *(cache_grad_attn_weight+threadIdx.x)=0;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
+ }
+
+ __syncthreads();
+
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
+ {
+ if (tid < s) {
+ const unsigned int xid1 = tid << 1;
+ const unsigned int xid2 = (tid + s) << 1;
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
+ if (tid + (s << 1) < spre)
+ {
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
+ }
+ }
+ __syncthreads();
+ }
+
+ if (tid == 0)
+ {
+ atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
+ atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
+ atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
+ }
+ __syncthreads();
+
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+
+template
+__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
+ const scalar_t *grad_col,
+ const scalar_t *data_value,
+ const int64_t *data_spatial_shapes,
+ const int64_t *data_level_start_index,
+ const scalar_t *data_sampling_loc,
+ const scalar_t *data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t *grad_value,
+ scalar_t *grad_sampling_loc,
+ scalar_t *grad_attn_weight)
+{
+ CUDA_KERNEL_LOOP(index, n)
+ {
+ int _temp = index;
+ const int c_col = _temp % channels;
+ _temp /= channels;
+ const int sampling_index = _temp;
+ const int m_col = _temp % num_heads;
+ _temp /= num_heads;
+ const int q_col = _temp % num_query;
+ _temp /= num_query;
+ const int b_col = _temp;
+
+ const scalar_t top_grad = grad_col[index];
+
+ int data_weight_ptr = sampling_index * num_levels * num_point;
+ int data_loc_w_ptr = data_weight_ptr << 1;
+ const int grad_sampling_ptr = data_weight_ptr;
+ grad_sampling_loc += grad_sampling_ptr << 1;
+ grad_attn_weight += grad_sampling_ptr;
+ const int grad_weight_stride = 1;
+ const int grad_loc_stride = 2;
+ const int qid_stride = num_heads * channels;
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
+
+ for (int l_col=0; l_col < num_levels; ++l_col)
+ {
+ const int level_start_id = data_level_start_index[l_col];
+ const int spatial_h_ptr = l_col << 1;
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
+
+ for (int p_col=0; p_col < num_point; ++p_col)
+ {
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
+
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
+ {
+ ms_deform_attn_col2im_bilinear_gm(
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
+ top_grad, weight, grad_value_ptr,
+ grad_sampling_loc, grad_attn_weight);
+ }
+ data_weight_ptr += 1;
+ data_loc_w_ptr += 2;
+ grad_attn_weight += grad_weight_stride;
+ grad_sampling_loc += grad_loc_stride;
+ }
+ }
+ }
+}
+
+
+template
+void ms_deformable_im2col_cuda(cudaStream_t stream,
+ const scalar_t* data_value,
+ const int64_t* data_spatial_shapes,
+ const int64_t* data_level_start_index,
+ const scalar_t* data_sampling_loc,
+ const scalar_t* data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t* data_col)
+{
+ const int num_kernels = batch_size * num_query * num_heads * channels;
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
+ const int num_threads = CUDA_NUM_THREADS;
+ ms_deformable_im2col_gpu_kernel
+ <<>>(
+ num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
+ batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
+
+ cudaError_t err = cudaGetLastError();
+ if (err != cudaSuccess)
+ {
+ printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
+ }
+
+}
+
+template
+void ms_deformable_col2im_cuda(cudaStream_t stream,
+ const scalar_t* grad_col,
+ const scalar_t* data_value,
+ const int64_t * data_spatial_shapes,
+ const int64_t * data_level_start_index,
+ const scalar_t * data_sampling_loc,
+ const scalar_t * data_attn_weight,
+ const int batch_size,
+ const int spatial_size,
+ const int num_heads,
+ const int channels,
+ const int num_levels,
+ const int num_query,
+ const int num_point,
+ scalar_t* grad_value,
+ scalar_t* grad_sampling_loc,
+ scalar_t* grad_attn_weight)
+{
+ const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
+ const int num_kernels = batch_size * num_query * num_heads * channels;
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
+ if (channels > 1024)
+ {
+ if ((channels & 1023) == 0)
+ {
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ }
+ else
+ {
+ ms_deformable_col2im_gpu_kernel_gm
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ }
+ }
+ else{
+ switch(channels)
+ {
+ case 1:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 2:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 4:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 8:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 16:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 32:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 64:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 128:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 256:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 512:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ case 1024:
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ break;
+ default:
+ if (channels < 64)
+ {
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v1
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ }
+ else
+ {
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2
+ <<>>(
+ num_kernels,
+ grad_col,
+ data_value,
+ data_spatial_shapes,
+ data_level_start_index,
+ data_sampling_loc,
+ data_attn_weight,
+ batch_size,
+ spatial_size,
+ num_heads,
+ channels,
+ num_levels,
+ num_query,
+ num_point,
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight);
+ }
+ }
+ }
+ cudaError_t err = cudaGetLastError();
+ if (err != cudaSuccess)
+ {
+ printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
+ }
+
+}
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/cuda_version.cu b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/cuda_version.cu
new file mode 100644
index 00000000..64569e34
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/cuda_version.cu
@@ -0,0 +1,7 @@
+#include
+
+namespace groundingdino {
+int get_cudart_version() {
+ return CUDART_VERSION;
+}
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/vision.cpp b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/vision.cpp
new file mode 100644
index 00000000..db58983a
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/csrc/vision.cpp
@@ -0,0 +1,58 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+#include "MsDeformAttn/ms_deform_attn.h"
+
+namespace groundingdino {
+
+#ifdef WITH_CUDA
+extern int get_cudart_version();
+#endif
+
+std::string get_cuda_version() {
+#ifdef WITH_CUDA
+ std::ostringstream oss;
+
+ // copied from
+ // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231
+ auto printCudaStyleVersion = [&](int v) {
+ oss << (v / 1000) << "." << (v / 10 % 100);
+ if (v % 10 != 0) {
+ oss << "." << (v % 10);
+ }
+ };
+ printCudaStyleVersion(get_cudart_version());
+ return oss.str();
+#else
+ return std::string("not available");
+#endif
+}
+
+// similar to
+// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp
+std::string get_compiler_version() {
+ std::ostringstream ss;
+#if defined(__GNUC__)
+#ifndef __clang__
+ { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; }
+#endif
+#endif
+
+#if defined(__clang_major__)
+ {
+ ss << "clang " << __clang_major__ << "." << __clang_minor__ << "."
+ << __clang_patchlevel__;
+ }
+#endif
+
+#if defined(_MSC_VER)
+ { ss << "MSVC " << _MSC_FULL_VER; }
+#endif
+ return ss.str();
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
+ m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
+}
+
+} // namespace groundingdino
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/fuse_modules.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/fuse_modules.py
new file mode 100644
index 00000000..758c225c
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/fuse_modules.py
@@ -0,0 +1,291 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from timm.models.layers import DropPath
+
+
+class FeatureResizer(nn.Module):
+ """
+ This class takes as input a set of embeddings of dimension C1 and outputs a set of
+ embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
+ """
+
+ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
+ super().__init__()
+ self.do_ln = do_ln
+ # Object feature encoding
+ self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
+ self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self, encoder_features):
+ x = self.fc(encoder_features)
+ if self.do_ln:
+ x = self.layer_norm(x)
+ output = self.dropout(x)
+ return output
+
+
+def l1norm(X, dim, eps=1e-8):
+ """L1-normalize columns of X"""
+ norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
+ X = torch.div(X, norm)
+ return X
+
+
+def l2norm(X, dim, eps=1e-8):
+ """L2-normalize columns of X"""
+ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
+ X = torch.div(X, norm)
+ return X
+
+
+def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8):
+ """
+ query: (n_context, queryL, d)
+ context: (n_context, sourceL, d)
+ """
+ batch_size_q, queryL = query.size(0), query.size(1)
+ batch_size, sourceL = context.size(0), context.size(1)
+
+ # Get attention
+ # --> (batch, d, queryL)
+ queryT = torch.transpose(query, 1, 2)
+
+ # (batch, sourceL, d)(batch, d, queryL)
+ # --> (batch, sourceL, queryL)
+ attn = torch.bmm(context, queryT)
+ if raw_feature_norm == "softmax":
+ # --> (batch*sourceL, queryL)
+ attn = attn.view(batch_size * sourceL, queryL)
+ attn = nn.Softmax()(attn)
+ # --> (batch, sourceL, queryL)
+ attn = attn.view(batch_size, sourceL, queryL)
+ elif raw_feature_norm == "l2norm":
+ attn = l2norm(attn, 2)
+ elif raw_feature_norm == "clipped_l2norm":
+ attn = nn.LeakyReLU(0.1)(attn)
+ attn = l2norm(attn, 2)
+ else:
+ raise ValueError("unknown first norm type:", raw_feature_norm)
+ # --> (batch, queryL, sourceL)
+ attn = torch.transpose(attn, 1, 2).contiguous()
+ # --> (batch*queryL, sourceL)
+ attn = attn.view(batch_size * queryL, sourceL)
+ attn = nn.Softmax()(attn * smooth)
+ # --> (batch, queryL, sourceL)
+ attn = attn.view(batch_size, queryL, sourceL)
+ # --> (batch, sourceL, queryL)
+ attnT = torch.transpose(attn, 1, 2).contiguous()
+
+ # --> (batch, d, sourceL)
+ contextT = torch.transpose(context, 1, 2)
+ # (batch x d x sourceL)(batch x sourceL x queryL)
+ # --> (batch, d, queryL)
+ weightedContext = torch.bmm(contextT, attnT)
+ # --> (batch, queryL, d)
+ weightedContext = torch.transpose(weightedContext, 1, 2)
+
+ return weightedContext, attnT
+
+
+class BiMultiHeadAttention(nn.Module):
+ def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None):
+ super(BiMultiHeadAttention, self).__init__()
+
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.head_dim = embed_dim // num_heads
+ self.v_dim = v_dim
+ self.l_dim = l_dim
+
+ assert (
+ self.head_dim * self.num_heads == self.embed_dim
+ ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
+ self.scale = self.head_dim ** (-0.5)
+ self.dropout = dropout
+
+ self.v_proj = nn.Linear(self.v_dim, self.embed_dim)
+ self.l_proj = nn.Linear(self.l_dim, self.embed_dim)
+ self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim)
+ self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim)
+
+ self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim)
+ self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim)
+
+ self.stable_softmax_2d = True
+ self.clamp_min_for_underflow = True
+ self.clamp_max_for_overflow = True
+
+ self._reset_parameters()
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def _reset_parameters(self):
+ nn.init.xavier_uniform_(self.v_proj.weight)
+ self.v_proj.bias.data.fill_(0)
+ nn.init.xavier_uniform_(self.l_proj.weight)
+ self.l_proj.bias.data.fill_(0)
+ nn.init.xavier_uniform_(self.values_v_proj.weight)
+ self.values_v_proj.bias.data.fill_(0)
+ nn.init.xavier_uniform_(self.values_l_proj.weight)
+ self.values_l_proj.bias.data.fill_(0)
+ nn.init.xavier_uniform_(self.out_v_proj.weight)
+ self.out_v_proj.bias.data.fill_(0)
+ nn.init.xavier_uniform_(self.out_l_proj.weight)
+ self.out_l_proj.bias.data.fill_(0)
+
+ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
+ """_summary_
+
+ Args:
+ v (_type_): bs, n_img, dim
+ l (_type_): bs, n_text, dim
+ attention_mask_v (_type_, optional): _description_. bs, n_img
+ attention_mask_l (_type_, optional): _description_. bs, n_text
+
+ Returns:
+ _type_: _description_
+ """
+ # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
+ # import ipdb; ipdb.set_trace()
+ bsz, tgt_len, _ = v.size()
+
+ query_states = self.v_proj(v) * self.scale
+ key_states = self._shape(self.l_proj(l), -1, bsz)
+ value_v_states = self._shape(self.values_v_proj(v), -1, bsz)
+ value_l_states = self._shape(self.values_l_proj(l), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_v_states = value_v_states.view(*proj_shape)
+ value_l_states = value_l_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
+ )
+
+ if self.stable_softmax_2d:
+ attn_weights = attn_weights - attn_weights.max()
+
+ if self.clamp_min_for_underflow:
+ attn_weights = torch.clamp(
+ attn_weights, min=-50000
+ ) # Do not increase -50000, data type half has quite limited range
+ if self.clamp_max_for_overflow:
+ attn_weights = torch.clamp(
+ attn_weights, max=50000
+ ) # Do not increase 50000, data type half has quite limited range
+
+ attn_weights_T = attn_weights.transpose(1, 2)
+ attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0]
+ if self.clamp_min_for_underflow:
+ attn_weights_l = torch.clamp(
+ attn_weights_l, min=-50000
+ ) # Do not increase -50000, data type half has quite limited range
+ if self.clamp_max_for_overflow:
+ attn_weights_l = torch.clamp(
+ attn_weights_l, max=50000
+ ) # Do not increase 50000, data type half has quite limited range
+
+ # mask vison for language
+ if attention_mask_v is not None:
+ attention_mask_v = attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
+ attn_weights_l.masked_fill_(attention_mask_v, float("-inf"))
+
+ attn_weights_l = attn_weights_l.softmax(dim=-1)
+
+ # mask language for vision
+ if attention_mask_l is not None:
+ attention_mask_l = attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
+ attn_weights.masked_fill_(attention_mask_l, float("-inf"))
+ attn_weights_v = attn_weights.softmax(dim=-1)
+
+ attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training)
+ attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training)
+
+ attn_output_v = torch.bmm(attn_probs_v, value_l_states)
+ attn_output_l = torch.bmm(attn_probs_l, value_v_states)
+
+ if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}"
+ )
+
+ if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}"
+ )
+
+ attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output_v = attn_output_v.transpose(1, 2)
+ attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim)
+ attn_output_l = attn_output_l.transpose(1, 2)
+ attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim)
+
+ attn_output_v = self.out_v_proj(attn_output_v)
+ attn_output_l = self.out_l_proj(attn_output_l)
+
+ return attn_output_v, attn_output_l
+
+
+# Bi-Direction MHA (text->image, image->text)
+class BiAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ v_dim,
+ l_dim,
+ embed_dim,
+ num_heads,
+ dropout=0.1,
+ drop_path=0.0,
+ init_values=1e-4,
+ cfg=None,
+ ):
+ """
+ Inputs:
+ embed_dim - Dimensionality of input and attention feature vectors
+ hidden_dim - Dimensionality of hidden layer in feed-forward network
+ (usually 2-4x larger than embed_dim)
+ num_heads - Number of heads to use in the Multi-Head Attention block
+ dropout - Amount of dropout to apply in the feed-forward network
+ """
+ super(BiAttentionBlock, self).__init__()
+
+ # pre layer norm
+ self.layer_norm_v = nn.LayerNorm(v_dim)
+ self.layer_norm_l = nn.LayerNorm(l_dim)
+ self.attn = BiMultiHeadAttention(
+ v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
+ )
+
+ # add layer scale for training stability
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
+ self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
+
+ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
+ v = self.layer_norm_v(v)
+ l = self.layer_norm_l(l)
+ delta_v, delta_l = self.attn(v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l)
+ # v, l = v + delta_v, l + delta_l
+ v = v + self.drop_path(self.gamma_v * delta_v)
+ l = l + self.drop_path(self.gamma_l * delta_l)
+ return v, l
+
+ # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None)
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/groundingdino.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/groundingdino.py
new file mode 100644
index 00000000..5c431967
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/groundingdino.py
@@ -0,0 +1,392 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Conditional DETR model and criterion classes.
+# Copyright (c) 2021 Microsoft. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# ------------------------------------------------------------------------
+# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
+# Copyright (c) 2020 SenseTime. All Rights Reserved.
+# ------------------------------------------------------------------------
+import copy
+from typing import List
+
+import torch
+import torch.nn.functional as F
+from groundingdino.util import box_ops, get_tokenlizer
+from groundingdino.util.misc import (
+ NestedTensor,
+ accuracy,
+ get_world_size,
+ interpolate,
+ inverse_sigmoid,
+ is_dist_avail_and_initialized,
+ nested_tensor_from_tensor_list,
+)
+from groundingdino.util.utils import get_phrases_from_posmap
+from groundingdino.util.visualizer import COCOVisualizer
+from groundingdino.util.vl_utils import create_positive_map_from_span
+from torch import nn
+from torchvision.ops.boxes import nms
+from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
+
+from ..registry import MODULE_BUILD_FUNCS
+from .backbone import build_backbone
+from .bertwarper import (
+ BertModelWarper,
+ generate_masks_with_special_tokens,
+ generate_masks_with_special_tokens_and_transfer_map,
+)
+from .transformer import build_transformer
+from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss
+
+
+class GroundingDINO(nn.Module):
+ """This is the Cross-Attention Detector module that performs object detection"""
+
+ def __init__(
+ self,
+ backbone,
+ transformer,
+ num_queries,
+ aux_loss=False,
+ iter_update=False,
+ query_dim=2,
+ num_feature_levels=1,
+ nheads=8,
+ # two stage
+ two_stage_type="no", # ['no', 'standard']
+ dec_pred_bbox_embed_share=True,
+ two_stage_class_embed_share=True,
+ two_stage_bbox_embed_share=True,
+ num_patterns=0,
+ dn_number=100,
+ dn_box_noise_scale=0.4,
+ dn_label_noise_ratio=0.5,
+ dn_labelbook_size=100,
+ text_encoder_type="bert-base-uncased",
+ sub_sentence_present=True,
+ max_text_len=256,
+ ):
+ """Initializes the model.
+ Parameters:
+ backbone: torch module of the backbone to be used. See backbone.py
+ transformer: torch module of the transformer architecture. See transformer.py
+ num_queries: number of object queries, ie detection slot. This is the maximal number of objects
+ Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
+ aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
+ """
+ super().__init__()
+ self.num_queries = num_queries
+ self.transformer = transformer
+ self.hidden_dim = hidden_dim = transformer.d_model
+ self.num_feature_levels = num_feature_levels
+ self.nheads = nheads
+ self.max_text_len = 256
+ self.sub_sentence_present = sub_sentence_present
+
+ # setting query dim
+ self.query_dim = query_dim
+ assert query_dim == 4
+
+ # for dn training
+ self.num_patterns = num_patterns
+ self.dn_number = dn_number
+ self.dn_box_noise_scale = dn_box_noise_scale
+ self.dn_label_noise_ratio = dn_label_noise_ratio
+ self.dn_labelbook_size = dn_labelbook_size
+
+ # bert
+ self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type)
+ self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type)
+ self.bert.pooler.dense.weight.requires_grad_(False)
+ self.bert.pooler.dense.bias.requires_grad_(False)
+ self.bert = BertModelWarper(bert_model=self.bert)
+
+ self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True)
+ nn.init.constant_(self.feat_map.bias.data, 0)
+ nn.init.xavier_uniform_(self.feat_map.weight.data)
+ # freeze
+
+ # special tokens
+ self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])
+
+ # prepare input projection layers
+ if num_feature_levels > 1:
+ num_backbone_outs = len(backbone.num_channels)
+ input_proj_list = []
+ for _ in range(num_backbone_outs):
+ in_channels = backbone.num_channels[_]
+ input_proj_list.append(
+ nn.Sequential(
+ nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
+ nn.GroupNorm(32, hidden_dim),
+ )
+ )
+ for _ in range(num_feature_levels - num_backbone_outs):
+ input_proj_list.append(
+ nn.Sequential(
+ nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
+ nn.GroupNorm(32, hidden_dim),
+ )
+ )
+ in_channels = hidden_dim
+ self.input_proj = nn.ModuleList(input_proj_list)
+ else:
+ assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!"
+ self.input_proj = nn.ModuleList(
+ [
+ nn.Sequential(
+ nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
+ nn.GroupNorm(32, hidden_dim),
+ )
+ ]
+ )
+
+ self.backbone = backbone
+ self.aux_loss = aux_loss
+ self.box_pred_damping = box_pred_damping = None
+
+ self.iter_update = iter_update
+ assert iter_update, "Why not iter_update?"
+
+ # prepare pred layers
+ self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
+ # prepare class & box embed
+ _class_embed = ContrastiveEmbed()
+
+ _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
+ nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
+ nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
+
+ if dec_pred_bbox_embed_share:
+ box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]
+ else:
+ box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)]
+ class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]
+ self.bbox_embed = nn.ModuleList(box_embed_layerlist)
+ self.class_embed = nn.ModuleList(class_embed_layerlist)
+ self.transformer.decoder.bbox_embed = self.bbox_embed
+ self.transformer.decoder.class_embed = self.class_embed
+
+ # two stage
+ self.two_stage_type = two_stage_type
+ assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(two_stage_type)
+ if two_stage_type != "no":
+ if two_stage_bbox_embed_share:
+ assert dec_pred_bbox_embed_share
+ self.transformer.enc_out_bbox_embed = _bbox_embed
+ else:
+ self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)
+
+ if two_stage_class_embed_share:
+ assert dec_pred_bbox_embed_share
+ self.transformer.enc_out_class_embed = _class_embed
+ else:
+ self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)
+
+ self.refpoint_embed = None
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ # init input_proj
+ for proj in self.input_proj:
+ nn.init.xavier_uniform_(proj[0].weight, gain=1)
+ nn.init.constant_(proj[0].bias, 0)
+
+ def set_image_tensor(self, samples: NestedTensor):
+ if isinstance(samples, (list, torch.Tensor)):
+ samples = nested_tensor_from_tensor_list(samples)
+ self.features, self.poss = self.backbone(samples)
+
+ def unset_image_tensor(self):
+ if hasattr(self, "features"):
+ del self.features
+ if hasattr(self, "poss"):
+ del self.poss
+
+ def set_image_features(self, features, poss):
+ self.features = features
+ self.poss = poss
+
+ def init_ref_points(self, use_num_queries):
+ self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim)
+
+ def forward(self, samples: NestedTensor, targets: List = None, **kw):
+ """The forward expects a NestedTensor, which consists of:
+ - samples.tensor: batched images, of shape [batch_size x 3 x H x W]
+ - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
+
+ It returns a dict with the following elements:
+ - "pred_logits": the classification logits (including no-object) for all queries.
+ Shape= [batch_size x num_queries x num_classes]
+ - "pred_boxes": The normalized boxes coordinates for all queries, represented as
+ (center_x, center_y, width, height). These values are normalized in [0, 1],
+ relative to the size of each individual image (disregarding possible padding).
+ See PostProcess for information on how to retrieve the unnormalized bounding box.
+ - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
+ dictionnaries containing the two above keys for each decoder layer.
+ """
+ if targets is None:
+ captions = kw["captions"]
+ else:
+ captions = [t["caption"] for t in targets]
+
+ # encoder texts
+ tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(samples.device)
+ (
+ text_self_attention_masks,
+ position_ids,
+ cate_to_token_mask_list,
+ ) = generate_masks_with_special_tokens_and_transfer_map(tokenized, self.specical_tokens, self.tokenizer)
+
+ if text_self_attention_masks.shape[1] > self.max_text_len:
+ text_self_attention_masks = text_self_attention_masks[:, : self.max_text_len, : self.max_text_len]
+ position_ids = position_ids[:, : self.max_text_len]
+ tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len]
+ tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len]
+ tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len]
+
+ # extract text embeddings
+ if self.sub_sentence_present:
+ tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"}
+ tokenized_for_encoder["attention_mask"] = text_self_attention_masks
+ tokenized_for_encoder["position_ids"] = position_ids
+ else:
+ # import ipdb; ipdb.set_trace()
+ tokenized_for_encoder = tokenized
+
+ bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768
+
+ encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model
+ text_token_mask = tokenized.attention_mask.bool() # bs, 195
+ # text_token_mask: True for nomask, False for mask
+ # text_self_attention_masks: True for nomask, False for mask
+
+ if encoded_text.shape[1] > self.max_text_len:
+ encoded_text = encoded_text[:, : self.max_text_len, :]
+ text_token_mask = text_token_mask[:, : self.max_text_len]
+ position_ids = position_ids[:, : self.max_text_len]
+ text_self_attention_masks = text_self_attention_masks[:, : self.max_text_len, : self.max_text_len]
+
+ text_dict = {
+ "encoded_text": encoded_text, # bs, 195, d_model
+ "text_token_mask": text_token_mask, # bs, 195
+ "position_ids": position_ids, # bs, 195
+ "text_self_attention_masks": text_self_attention_masks, # bs, 195,195
+ }
+
+ # import ipdb; ipdb.set_trace()
+ if isinstance(samples, (list, torch.Tensor)):
+ samples = nested_tensor_from_tensor_list(samples)
+ if not hasattr(self, "features") or not hasattr(self, "poss"):
+ self.set_image_tensor(samples)
+
+ srcs = []
+ masks = []
+ for l, feat in enumerate(self.features):
+ src, mask = feat.decompose()
+ srcs.append(self.input_proj[l](src))
+ masks.append(mask)
+ assert mask is not None
+ if self.num_feature_levels > len(srcs):
+ _len_srcs = len(srcs)
+ for l in range(_len_srcs, self.num_feature_levels):
+ if l == _len_srcs:
+ src = self.input_proj[l](self.features[-1].tensors)
+ else:
+ src = self.input_proj[l](srcs[-1])
+ m = samples.mask
+ mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
+ pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
+ srcs.append(src)
+ masks.append(mask)
+ self.poss.append(pos_l)
+
+ input_query_bbox = input_query_label = attn_mask = dn_meta = None
+ hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(
+ srcs, masks, input_query_bbox, self.poss, input_query_label, attn_mask, text_dict
+ )
+
+ # deformable-detr-like anchor update
+ outputs_coord_list = []
+ for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(
+ zip(reference[:-1], self.bbox_embed, hs)
+ ):
+ layer_delta_unsig = layer_bbox_embed(layer_hs)
+ layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
+ layer_outputs_unsig = layer_outputs_unsig.sigmoid()
+ outputs_coord_list.append(layer_outputs_unsig)
+ outputs_coord_list = torch.stack(outputs_coord_list)
+
+ # output
+ outputs_class = torch.stack(
+ [layer_cls_embed(layer_hs, text_dict) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]
+ )
+ out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]}
+
+ # # for intermediate outputs
+ # if self.aux_loss:
+ # out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)
+
+ # # for encoder output
+ # if hs_enc is not None:
+ # # prepare intermediate outputs
+ # interm_coord = ref_enc[-1]
+ # interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict)
+ # out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}
+ # out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}
+ unset_image_tensor = kw.get("unset_image_tensor", True)
+ if unset_image_tensor:
+ self.unset_image_tensor() ## If necessary
+ return out
+
+ @torch.jit.unused
+ def _set_aux_loss(self, outputs_class, outputs_coord):
+ # this is a workaround to make torchscript happy, as torchscript
+ # doesn't support dictionary with non-homogeneous values, such
+ # as a dict having both a Tensor and a list.
+ return [{"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+
+
+@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino")
+def build_groundingdino(args):
+
+ backbone = build_backbone(args)
+ transformer = build_transformer(args)
+
+ dn_labelbook_size = args.dn_labelbook_size
+ dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share
+ sub_sentence_present = args.sub_sentence_present
+
+ model = GroundingDINO(
+ backbone,
+ transformer,
+ num_queries=args.num_queries,
+ aux_loss=True,
+ iter_update=True,
+ query_dim=4,
+ num_feature_levels=args.num_feature_levels,
+ nheads=args.nheads,
+ dec_pred_bbox_embed_share=dec_pred_bbox_embed_share,
+ two_stage_type=args.two_stage_type,
+ two_stage_bbox_embed_share=args.two_stage_bbox_embed_share,
+ two_stage_class_embed_share=args.two_stage_class_embed_share,
+ num_patterns=args.num_patterns,
+ dn_number=0,
+ dn_box_noise_scale=args.dn_box_noise_scale,
+ dn_label_noise_ratio=args.dn_label_noise_ratio,
+ dn_labelbook_size=dn_labelbook_size,
+ text_encoder_type=args.text_encoder_type,
+ sub_sentence_present=sub_sentence_present,
+ max_text_len=args.max_text_len,
+ )
+
+ return model
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/ms_deform_attn.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/ms_deform_attn.py
new file mode 100644
index 00000000..608d7935
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/ms_deform_attn.py
@@ -0,0 +1,399 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Deformable DETR
+# Copyright (c) 2020 SenseTime. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------------------------------
+# Modified from:
+# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py
+# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
+# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py
+# ------------------------------------------------------------------------------------------------
+
+import math
+import warnings
+from typing import Optional
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.init import constant_, xavier_uniform_
+
+try:
+ from groundingdino import _C
+except:
+ warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!")
+
+
+# helpers
+def _is_power_of_2(n):
+ if (not isinstance(n, int)) or (n < 0):
+ raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
+ return (n & (n - 1) == 0) and n != 0
+
+
+class MultiScaleDeformableAttnFunction(Function):
+ @staticmethod
+ def forward(
+ ctx,
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ ):
+ ctx.im2col_step = im2col_step
+ output = _C.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ctx.im2col_step,
+ )
+ ctx.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = ctx.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ ctx.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+def multi_scale_deformable_attn_pytorch(
+ value: torch.Tensor,
+ value_spatial_shapes: torch.Tensor,
+ sampling_locations: torch.Tensor,
+ attention_weights: torch.Tensor,
+) -> torch.Tensor:
+
+ bs, _, num_heads, embed_dims = value.shape
+ _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
+ value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
+ sampling_grids = 2 * sampling_locations - 1
+ sampling_value_list = []
+ for level, (H_, W_) in enumerate(value_spatial_shapes):
+ # bs, H_*W_, num_heads, embed_dims ->
+ # bs, H_*W_, num_heads*embed_dims ->
+ # bs, num_heads*embed_dims, H_*W_ ->
+ # bs*num_heads, embed_dims, H_, W_
+ value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)
+ # bs, num_queries, num_heads, num_points, 2 ->
+ # bs, num_heads, num_queries, num_points, 2 ->
+ # bs*num_heads, num_queries, num_points, 2
+ sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)
+ # bs*num_heads, embed_dims, num_queries, num_points
+ sampling_value_l_ = F.grid_sample(
+ value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
+ )
+ sampling_value_list.append(sampling_value_l_)
+ # (bs, num_queries, num_heads, num_levels, num_points) ->
+ # (bs, num_heads, num_queries, num_levels, num_points) ->
+ # (bs, num_heads, 1, num_queries, num_levels*num_points)
+ attention_weights = attention_weights.transpose(1, 2).reshape(
+ bs * num_heads, 1, num_queries, num_levels * num_points
+ )
+ output = (
+ (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
+ .sum(-1)
+ .view(bs, num_heads * embed_dims, num_queries)
+ )
+ return output.transpose(1, 2).contiguous()
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ """Multi-Scale Deformable Attention Module used in Deformable-DETR
+
+ `Deformable DETR: Deformable Transformers for End-to-End Object Detection.
+ `_.
+
+ Args:
+ embed_dim (int): The embedding dimension of Attention. Default: 256.
+ num_heads (int): The number of attention heads. Default: 8.
+ num_levels (int): The number of feature map used in Attention. Default: 4.
+ num_points (int): The number of sampling points for each query
+ in each head. Default: 4.
+ img2col_steps (int): The step used in image_to_column. Defualt: 64.
+ dropout (float): Dropout layer used in output. Default: 0.1.
+ batch_first (bool): if ``True``, then the input and output tensor will be
+ provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)`
+ """
+
+ def __init__(
+ self,
+ embed_dim: int = 256,
+ num_heads: int = 8,
+ num_levels: int = 4,
+ num_points: int = 4,
+ img2col_step: int = 64,
+ batch_first: bool = False,
+ ):
+ super().__init__()
+ if embed_dim % num_heads != 0:
+ raise ValueError(
+ "embed_dim must be divisible by num_heads, but got {} and {}".format(embed_dim, num_heads)
+ )
+ head_dim = embed_dim // num_heads
+
+ self.batch_first = batch_first
+
+ if not _is_power_of_2(head_dim):
+ warnings.warn(
+ """
+ You'd better set d_model in MSDeformAttn to make sure that
+ each dim of the attention head a power of 2, which is more efficient.
+ """
+ )
+
+ self.im2col_step = img2col_step
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.num_levels = num_levels
+ self.num_points = num_points
+ self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2)
+ self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points)
+ self.value_proj = nn.Linear(embed_dim, embed_dim)
+ self.output_proj = nn.Linear(embed_dim, embed_dim)
+
+ self.init_weights()
+
+ def _reset_parameters(self):
+ return self.init_weights()
+
+ def init_weights(self):
+ """
+ Default initialization for Parameters of Module.
+ """
+ constant_(self.sampling_offsets.weight.data, 0.0)
+ thetas = torch.arange(self.num_heads, dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
+ grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
+ grid_init = (
+ (grid_init / grid_init.abs().max(-1, keepdim=True)[0])
+ .view(self.num_heads, 1, 1, 2)
+ .repeat(1, self.num_levels, self.num_points, 1)
+ )
+ for i in range(self.num_points):
+ grid_init[:, :, i, :] *= i + 1
+ with torch.no_grad():
+ self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
+ constant_(self.attention_weights.weight.data, 0.0)
+ constant_(self.attention_weights.bias.data, 0.0)
+ xavier_uniform_(self.value_proj.weight.data)
+ constant_(self.value_proj.bias.data, 0.0)
+ xavier_uniform_(self.output_proj.weight.data)
+ constant_(self.output_proj.bias.data, 0.0)
+
+ def freeze_sampling_offsets(self):
+ print("Freeze sampling offsets")
+ self.sampling_offsets.weight.requires_grad = False
+ self.sampling_offsets.bias.requires_grad = False
+
+ def freeze_attention_weights(self):
+ print("Freeze attention weights")
+ self.attention_weights.weight.requires_grad = False
+ self.attention_weights.bias.requires_grad = False
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: Optional[torch.Tensor] = None,
+ value: Optional[torch.Tensor] = None,
+ query_pos: Optional[torch.Tensor] = None,
+ key_padding_mask: Optional[torch.Tensor] = None,
+ reference_points: Optional[torch.Tensor] = None,
+ spatial_shapes: Optional[torch.Tensor] = None,
+ level_start_index: Optional[torch.Tensor] = None,
+ **kwargs
+ ) -> torch.Tensor:
+ """Forward Function of MultiScaleDeformableAttention
+
+ Args:
+ query (torch.Tensor): Query embeddings with shape
+ `(num_query, bs, embed_dim)`
+ key (torch.Tensor): Key embeddings with shape
+ `(num_key, bs, embed_dim)`
+ value (torch.Tensor): Value embeddings with shape
+ `(num_key, bs, embed_dim)`
+ query_pos (torch.Tensor): The position embedding for `query`. Default: None.
+ key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`,
+ indicating which elements within `key` to be ignored in attention.
+ reference_points (torch.Tensor): The normalized reference points
+ with shape `(bs, num_query, num_levels, 2)`,
+ all elements is range in [0, 1], top-left (0, 0),
+ bottom-right (1, 1), including padding are.
+ or `(N, Length_{query}, num_levels, 4)`, add additional
+ two dimensions `(h, w)` to form reference boxes.
+ spatial_shapes (torch.Tensor): Spatial shape of features in different levels.
+ With shape `(num_levels, 2)`, last dimension represents `(h, w)`.
+ level_start_index (torch.Tensor): The start index of each level. A tensor with
+ shape `(num_levels, )` which can be represented as
+ `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`.
+
+ Returns:
+ torch.Tensor: forward results with shape `(num_query, bs, embed_dim)`
+ """
+
+ if value is None:
+ value = query
+
+ if query_pos is not None:
+ query = query + query_pos
+
+ if not self.batch_first:
+ # change to (bs, num_query ,embed_dims)
+ query = query.permute(1, 0, 2)
+ value = value.permute(1, 0, 2)
+
+ bs, num_query, _ = query.shape
+ bs, num_value, _ = value.shape
+
+ assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
+
+ value = self.value_proj(value)
+ if key_padding_mask is not None:
+ value = value.masked_fill(key_padding_mask[..., None], float(0))
+ value = value.view(bs, num_value, self.num_heads, -1)
+ sampling_offsets = self.sampling_offsets(query).view(
+ bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
+ )
+ attention_weights = self.attention_weights(query).view(
+ bs, num_query, self.num_heads, self.num_levels * self.num_points
+ )
+ attention_weights = attention_weights.softmax(-1)
+ attention_weights = attention_weights.view(
+ bs,
+ num_query,
+ self.num_heads,
+ self.num_levels,
+ self.num_points,
+ )
+
+ # bs, num_query, num_heads, num_levels, num_points, 2
+ if reference_points.shape[-1] == 2:
+ offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
+ sampling_locations = (
+ reference_points[:, :, None, :, None, :]
+ + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
+ )
+ elif reference_points.shape[-1] == 4:
+ sampling_locations = (
+ reference_points[:, :, None, :, None, :2]
+ + sampling_offsets / self.num_points * reference_points[:, :, None, :, None, 2:] * 0.5
+ )
+ else:
+ raise ValueError(
+ "Last dim of reference_points must be 2 or 4, but get {} instead.".format(reference_points.shape[-1])
+ )
+
+ if torch.cuda.is_available() and value.is_cuda:
+ halffloat = False
+ if value.dtype == torch.float16:
+ halffloat = True
+ value = value.float()
+ sampling_locations = sampling_locations.float()
+ attention_weights = attention_weights.float()
+
+ output = MultiScaleDeformableAttnFunction.apply(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ self.im2col_step,
+ )
+
+ if halffloat:
+ output = output.half()
+ else:
+ output = multi_scale_deformable_attn_pytorch(value, spatial_shapes, sampling_locations, attention_weights)
+
+ output = self.output_proj(output)
+
+ if not self.batch_first:
+ output = output.permute(1, 0, 2)
+
+ return output
+
+
+def create_dummy_class(klass, dependency, message=""):
+ """
+ When a dependency of a class is not available, create a dummy class which throws ImportError
+ when used.
+
+ Args:
+ klass (str): name of the class.
+ dependency (str): name of the dependency.
+ message: extra message to print
+ Returns:
+ class: a class object
+ """
+ err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass)
+ if message:
+ err = err + " " + message
+
+ class _DummyMetaClass(type):
+ # throw error on class attribute access
+ def __getattr__(_, __): # noqa: B902
+ raise ImportError(err)
+
+ class _Dummy(object, metaclass=_DummyMetaClass):
+ # throw error on constructor
+ def __init__(self, *args, **kwargs):
+ raise ImportError(err)
+
+ return _Dummy
+
+
+def create_dummy_func(func, dependency, message=""):
+ """
+ When a dependency of a function is not available, create a dummy function which throws
+ ImportError when used.
+
+ Args:
+ func (str): name of the function.
+ dependency (str or list[str]): name(s) of the dependency.
+ message: extra message to print
+ Returns:
+ function: a function object
+ """
+ err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func)
+ if message:
+ err = err + " " + message
+
+ if isinstance(dependency, (list, tuple)):
+ dependency = ",".join(dependency)
+
+ def _dummy(*args, **kwargs):
+ raise ImportError(err)
+
+ return _dummy
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer.py
new file mode 100644
index 00000000..71f05e44
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer.py
@@ -0,0 +1,925 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# DINO
+# Copyright (c) 2022 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Conditional DETR Transformer class.
+# Copyright (c) 2021 Microsoft. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# ------------------------------------------------------------------------
+
+from typing import Optional
+
+import torch
+import torch.utils.checkpoint as checkpoint
+from groundingdino.util.misc import inverse_sigmoid
+from torch import Tensor, nn
+
+from .fuse_modules import BiAttentionBlock
+from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn
+from .transformer_vanilla import TransformerEncoderLayer
+from .utils import (
+ MLP,
+ _get_activation_fn,
+ _get_clones,
+ gen_encoder_output_proposals,
+ gen_sineembed_for_position,
+ get_sine_pos_embed,
+)
+
+
+class Transformer(nn.Module):
+ def __init__(
+ self,
+ d_model=256,
+ nhead=8,
+ num_queries=300,
+ num_encoder_layers=6,
+ num_unicoder_layers=0,
+ num_decoder_layers=6,
+ dim_feedforward=2048,
+ dropout=0.0,
+ activation="relu",
+ normalize_before=False,
+ return_intermediate_dec=False,
+ query_dim=4,
+ num_patterns=0,
+ # for deformable encoder
+ num_feature_levels=1,
+ enc_n_points=4,
+ dec_n_points=4,
+ # init query
+ learnable_tgt_init=False,
+ # two stage
+ two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']
+ embed_init_tgt=False,
+ # for text
+ use_text_enhancer=False,
+ use_fusion_layer=False,
+ use_checkpoint=False,
+ use_transformer_ckpt=False,
+ use_text_cross_attention=False,
+ text_dropout=0.1,
+ fusion_dropout=0.1,
+ fusion_droppath=0.0,
+ ):
+ super().__init__()
+ self.num_feature_levels = num_feature_levels
+ self.num_encoder_layers = num_encoder_layers
+ self.num_unicoder_layers = num_unicoder_layers
+ self.num_decoder_layers = num_decoder_layers
+ self.num_queries = num_queries
+ assert query_dim == 4
+
+ # choose encoder layer type
+ encoder_layer = DeformableTransformerEncoderLayer(
+ d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points
+ )
+
+ if use_text_enhancer:
+ text_enhance_layer = TransformerEncoderLayer(
+ d_model=d_model,
+ nhead=nhead // 2,
+ dim_feedforward=dim_feedforward // 2,
+ dropout=text_dropout,
+ )
+ else:
+ text_enhance_layer = None
+
+ if use_fusion_layer:
+ feature_fusion_layer = BiAttentionBlock(
+ v_dim=d_model,
+ l_dim=d_model,
+ embed_dim=dim_feedforward // 2,
+ num_heads=nhead // 2,
+ dropout=fusion_dropout,
+ drop_path=fusion_droppath,
+ )
+ else:
+ feature_fusion_layer = None
+
+ encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
+ assert encoder_norm is None
+ self.encoder = TransformerEncoder(
+ encoder_layer,
+ num_encoder_layers,
+ d_model=d_model,
+ num_queries=num_queries,
+ text_enhance_layer=text_enhance_layer,
+ feature_fusion_layer=feature_fusion_layer,
+ use_checkpoint=use_checkpoint,
+ use_transformer_ckpt=use_transformer_ckpt,
+ )
+
+ # choose decoder layer type
+ decoder_layer = DeformableTransformerDecoderLayer(
+ d_model,
+ dim_feedforward,
+ dropout,
+ activation,
+ num_feature_levels,
+ nhead,
+ dec_n_points,
+ use_text_cross_attention=use_text_cross_attention,
+ )
+
+ decoder_norm = nn.LayerNorm(d_model)
+ self.decoder = TransformerDecoder(
+ decoder_layer,
+ num_decoder_layers,
+ decoder_norm,
+ return_intermediate=return_intermediate_dec,
+ d_model=d_model,
+ query_dim=query_dim,
+ num_feature_levels=num_feature_levels,
+ )
+
+ self.d_model = d_model
+ self.nhead = nhead
+ self.dec_layers = num_decoder_layers
+ self.num_queries = num_queries # useful for single stage model only
+ self.num_patterns = num_patterns
+ if not isinstance(num_patterns, int):
+ Warning("num_patterns should be int but {}".format(type(num_patterns)))
+ self.num_patterns = 0
+
+ if num_feature_levels > 1:
+ if self.num_encoder_layers > 0:
+ self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
+ else:
+ self.level_embed = None
+
+ self.learnable_tgt_init = learnable_tgt_init
+ assert learnable_tgt_init, "why not learnable_tgt_init"
+ self.embed_init_tgt = embed_init_tgt
+ if (two_stage_type != "no" and embed_init_tgt) or (two_stage_type == "no"):
+ self.tgt_embed = nn.Embedding(self.num_queries, d_model)
+ nn.init.normal_(self.tgt_embed.weight.data)
+ else:
+ self.tgt_embed = None
+
+ # for two stage
+ self.two_stage_type = two_stage_type
+ assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(two_stage_type)
+ if two_stage_type == "standard":
+ # anchor selection at the output of encoder
+ self.enc_output = nn.Linear(d_model, d_model)
+ self.enc_output_norm = nn.LayerNorm(d_model)
+ self.two_stage_wh_embedding = None
+
+ if two_stage_type == "no":
+ self.init_ref_points(num_queries) # init self.refpoint_embed
+
+ self.enc_out_class_embed = None
+ self.enc_out_bbox_embed = None
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ for p in self.parameters():
+ if p.dim() > 1:
+ nn.init.xavier_uniform_(p)
+ for m in self.modules():
+ if isinstance(m, MSDeformAttn):
+ m._reset_parameters()
+ if self.num_feature_levels > 1 and self.level_embed is not None:
+ nn.init.normal_(self.level_embed)
+
+ def get_valid_ratio(self, mask):
+ _, H, W = mask.shape
+ valid_H = torch.sum(~mask[:, :, 0], 1)
+ valid_W = torch.sum(~mask[:, 0, :], 1)
+ valid_ratio_h = valid_H.float() / H
+ valid_ratio_w = valid_W.float() / W
+ valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
+ return valid_ratio
+
+ def init_ref_points(self, use_num_queries):
+ self.refpoint_embed = nn.Embedding(use_num_queries, 4)
+
+ def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None, text_dict=None):
+ """
+ Input:
+ - srcs: List of multi features [bs, ci, hi, wi]
+ - masks: List of multi masks [bs, hi, wi]
+ - refpoint_embed: [bs, num_dn, 4]. None in infer
+ - pos_embeds: List of multi pos embeds [bs, ci, hi, wi]
+ - tgt: [bs, num_dn, d_model]. None in infer
+
+ """
+ # prepare input for encoder
+ src_flatten = []
+ mask_flatten = []
+ lvl_pos_embed_flatten = []
+ spatial_shapes = []
+ for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
+ bs, c, h, w = src.shape
+ spatial_shape = (h, w)
+ spatial_shapes.append(spatial_shape)
+
+ src = src.flatten(2).transpose(1, 2) # bs, hw, c
+ mask = mask.flatten(1) # bs, hw
+ pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
+ if self.num_feature_levels > 1 and self.level_embed is not None:
+ lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
+ else:
+ lvl_pos_embed = pos_embed
+ lvl_pos_embed_flatten.append(lvl_pos_embed)
+ src_flatten.append(src)
+ mask_flatten.append(mask)
+ src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
+ mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
+ lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
+ spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
+ level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
+ valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
+
+ # two stage
+ enc_topk_proposals = enc_refpoint_embed = None
+
+ #########################################################
+ # Begin Encoder
+ #########################################################
+ memory, memory_text = self.encoder(
+ src_flatten,
+ pos=lvl_pos_embed_flatten,
+ level_start_index=level_start_index,
+ spatial_shapes=spatial_shapes,
+ valid_ratios=valid_ratios,
+ key_padding_mask=mask_flatten,
+ memory_text=text_dict["encoded_text"],
+ text_attention_mask=~text_dict["text_token_mask"],
+ # we ~ the mask . False means use the token; True means pad the token
+ position_ids=text_dict["position_ids"],
+ text_self_attention_masks=text_dict["text_self_attention_masks"],
+ )
+ #########################################################
+ # End Encoder
+ # - memory: bs, \sum{hw}, c
+ # - mask_flatten: bs, \sum{hw}
+ # - lvl_pos_embed_flatten: bs, \sum{hw}, c
+ # - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
+ # - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
+ #########################################################
+ text_dict["encoded_text"] = memory_text
+ # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
+ # if memory.isnan().any() | memory.isinf().any():
+ # import ipdb; ipdb.set_trace()
+
+ if self.two_stage_type == "standard":
+ output_memory, output_proposals = gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)
+ output_memory = self.enc_output_norm(self.enc_output(output_memory))
+
+ if text_dict is not None:
+ enc_outputs_class_unselected = self.enc_out_class_embed(output_memory, text_dict)
+ else:
+ enc_outputs_class_unselected = self.enc_out_class_embed(output_memory)
+
+ topk_logits = enc_outputs_class_unselected.max(-1)[0]
+ enc_outputs_coord_unselected = (
+ self.enc_out_bbox_embed(output_memory) + output_proposals
+ ) # (bs, \sum{hw}, 4) unsigmoid
+ topk = self.num_queries
+
+ topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] # bs, nq
+
+ # gather boxes
+ refpoint_embed_undetach = torch.gather(
+ enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
+ ) # unsigmoid
+ refpoint_embed_ = refpoint_embed_undetach.detach()
+ init_box_proposal = torch.gather(
+ output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
+ ).sigmoid() # sigmoid
+
+ # gather tgt
+ tgt_undetach = torch.gather(output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model))
+ if self.embed_init_tgt:
+ tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
+ else:
+ tgt_ = tgt_undetach.detach()
+
+ if refpoint_embed is not None:
+ refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
+ tgt = torch.cat([tgt, tgt_], dim=1)
+ else:
+ refpoint_embed, tgt = refpoint_embed_, tgt_
+
+ elif self.two_stage_type == "no":
+ tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
+ refpoint_embed_ = self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, 4
+
+ if refpoint_embed is not None:
+ refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
+ tgt = torch.cat([tgt, tgt_], dim=1)
+ else:
+ refpoint_embed, tgt = refpoint_embed_, tgt_
+
+ if self.num_patterns > 0:
+ tgt_embed = tgt.repeat(1, self.num_patterns, 1)
+ refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1)
+ tgt_pat = self.patterns.weight[None, :, :].repeat_interleave(
+ self.num_queries, 1
+ ) # 1, n_q*n_pat, d_model
+ tgt = tgt_embed + tgt_pat
+
+ init_box_proposal = refpoint_embed_.sigmoid()
+
+ else:
+ raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type))
+ #########################################################
+ # End preparing tgt
+ # - tgt: bs, NQ, d_model
+ # - refpoint_embed(unsigmoid): bs, NQ, d_model
+ #########################################################
+
+ #########################################################
+ # Begin Decoder
+ #########################################################
+ hs, references = self.decoder(
+ tgt=tgt.transpose(0, 1),
+ memory=memory.transpose(0, 1),
+ memory_key_padding_mask=mask_flatten,
+ pos=lvl_pos_embed_flatten.transpose(0, 1),
+ refpoints_unsigmoid=refpoint_embed.transpose(0, 1),
+ level_start_index=level_start_index,
+ spatial_shapes=spatial_shapes,
+ valid_ratios=valid_ratios,
+ tgt_mask=attn_mask,
+ memory_text=text_dict["encoded_text"],
+ text_attention_mask=~text_dict["text_token_mask"],
+ # we ~ the mask . False means use the token; True means pad the token
+ )
+ #########################################################
+ # End Decoder
+ # hs: n_dec, bs, nq, d_model
+ # references: n_dec+1, bs, nq, query_dim
+ #########################################################
+
+ #########################################################
+ # Begin postprocess
+ #########################################################
+ if self.two_stage_type == "standard":
+ hs_enc = tgt_undetach.unsqueeze(0)
+ ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0)
+ else:
+ hs_enc = ref_enc = None
+ #########################################################
+ # End postprocess
+ # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None
+ # ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None
+ #########################################################
+
+ return hs, references, hs_enc, ref_enc, init_box_proposal
+ # hs: (n_dec, bs, nq, d_model)
+ # references: sigmoid coordinates. (n_dec+1, bs, bq, 4)
+ # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None
+ # ref_enc: sigmoid coordinates. \
+ # (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None
+
+
+class TransformerEncoder(nn.Module):
+ def __init__(
+ self,
+ encoder_layer,
+ num_layers,
+ d_model=256,
+ num_queries=300,
+ enc_layer_share=False,
+ text_enhance_layer=None,
+ feature_fusion_layer=None,
+ use_checkpoint=False,
+ use_transformer_ckpt=False,
+ ):
+ """_summary_
+
+ Args:
+ encoder_layer (_type_): _description_
+ num_layers (_type_): _description_
+ norm (_type_, optional): _description_. Defaults to None.
+ d_model (int, optional): _description_. Defaults to 256.
+ num_queries (int, optional): _description_. Defaults to 300.
+ enc_layer_share (bool, optional): _description_. Defaults to False.
+
+ """
+ super().__init__()
+ # prepare layers
+ self.layers = []
+ self.text_layers = []
+ self.fusion_layers = []
+ if num_layers > 0:
+ self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share)
+
+ if text_enhance_layer is not None:
+ self.text_layers = _get_clones(text_enhance_layer, num_layers, layer_share=enc_layer_share)
+ if feature_fusion_layer is not None:
+ self.fusion_layers = _get_clones(feature_fusion_layer, num_layers, layer_share=enc_layer_share)
+ else:
+ self.layers = []
+ del encoder_layer
+
+ if text_enhance_layer is not None:
+ self.text_layers = []
+ del text_enhance_layer
+ if feature_fusion_layer is not None:
+ self.fusion_layers = []
+ del feature_fusion_layer
+
+ self.query_scale = None
+ self.num_queries = num_queries
+ self.num_layers = num_layers
+ self.d_model = d_model
+
+ self.use_checkpoint = use_checkpoint
+ self.use_transformer_ckpt = use_transformer_ckpt
+
+ @staticmethod
+ def get_reference_points(spatial_shapes, valid_ratios, device):
+ reference_points_list = []
+ for lvl, (H_, W_) in enumerate(spatial_shapes):
+
+ ref_y, ref_x = torch.meshgrid(
+ torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
+ torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device),
+ )
+ ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
+ ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
+ ref = torch.stack((ref_x, ref_y), -1)
+ reference_points_list.append(ref)
+ reference_points = torch.cat(reference_points_list, 1)
+ reference_points = reference_points[:, :, None] * valid_ratios[:, None]
+ return reference_points
+
+ def forward(
+ self,
+ # for images
+ src: Tensor,
+ pos: Tensor,
+ spatial_shapes: Tensor,
+ level_start_index: Tensor,
+ valid_ratios: Tensor,
+ key_padding_mask: Tensor,
+ # for texts
+ memory_text: Tensor = None,
+ text_attention_mask: Tensor = None,
+ pos_text: Tensor = None,
+ text_self_attention_masks: Tensor = None,
+ position_ids: Tensor = None,
+ ):
+ """
+ Input:
+ - src: [bs, sum(hi*wi), 256]
+ - pos: pos embed for src. [bs, sum(hi*wi), 256]
+ - spatial_shapes: h,w of each level [num_level, 2]
+ - level_start_index: [num_level] start point of level in sum(hi*wi).
+ - valid_ratios: [bs, num_level, 2]
+ - key_padding_mask: [bs, sum(hi*wi)]
+
+ - memory_text: bs, n_text, 256
+ - text_attention_mask: bs, n_text
+ False for no padding; True for padding
+ - pos_text: bs, n_text, 256
+
+ - position_ids: bs, n_text
+ Intermedia:
+ - reference_points: [bs, sum(hi*wi), num_level, 2]
+ Outpus:
+ - output: [bs, sum(hi*wi), 256]
+ """
+
+ output = src
+
+ # preparation and reshape
+ if self.num_layers > 0:
+ reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
+
+ if self.text_layers:
+ # generate pos_text
+ bs, n_text, text_dim = memory_text.shape
+ if pos_text is None and position_ids is None:
+ pos_text = (
+ torch.arange(n_text, device=memory_text.device).float().unsqueeze(0).unsqueeze(-1).repeat(bs, 1, 1)
+ )
+ pos_text = get_sine_pos_embed(pos_text, num_pos_feats=256, exchange_xy=False)
+ if position_ids is not None:
+ pos_text = get_sine_pos_embed(position_ids[..., None], num_pos_feats=256, exchange_xy=False)
+
+ # main process
+ for layer_id, layer in enumerate(self.layers):
+ # if output.isnan().any() or memory_text.isnan().any():
+ # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
+ # import ipdb; ipdb.set_trace()
+ if self.fusion_layers:
+ if self.use_checkpoint:
+ output, memory_text = checkpoint.checkpoint(
+ self.fusion_layers[layer_id],
+ output,
+ memory_text,
+ key_padding_mask,
+ text_attention_mask,
+ )
+ else:
+ output, memory_text = self.fusion_layers[layer_id](
+ v=output,
+ l=memory_text,
+ attention_mask_v=key_padding_mask,
+ attention_mask_l=text_attention_mask,
+ )
+
+ if self.text_layers:
+ memory_text = self.text_layers[layer_id](
+ src=memory_text.transpose(0, 1),
+ src_mask=~text_self_attention_masks, # note we use ~ for mask here
+ src_key_padding_mask=text_attention_mask,
+ pos=(pos_text.transpose(0, 1) if pos_text is not None else None),
+ ).transpose(0, 1)
+
+ # main process
+ if self.use_transformer_ckpt:
+ output = checkpoint.checkpoint(
+ layer,
+ output,
+ pos,
+ reference_points,
+ spatial_shapes,
+ level_start_index,
+ key_padding_mask,
+ )
+ else:
+ output = layer(
+ src=output,
+ pos=pos,
+ reference_points=reference_points,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ key_padding_mask=key_padding_mask,
+ )
+
+ return output, memory_text
+
+
+class TransformerDecoder(nn.Module):
+ def __init__(
+ self,
+ decoder_layer,
+ num_layers,
+ norm=None,
+ return_intermediate=False,
+ d_model=256,
+ query_dim=4,
+ num_feature_levels=1,
+ ):
+ super().__init__()
+ if num_layers > 0:
+ self.layers = _get_clones(decoder_layer, num_layers)
+ else:
+ self.layers = []
+ self.num_layers = num_layers
+ self.norm = norm
+ self.return_intermediate = return_intermediate
+ assert return_intermediate, "support return_intermediate only"
+ self.query_dim = query_dim
+ assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim)
+ self.num_feature_levels = num_feature_levels
+
+ self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)
+ self.query_pos_sine_scale = None
+
+ self.query_scale = None
+ self.bbox_embed = None
+ self.class_embed = None
+
+ self.d_model = d_model
+
+ self.ref_anchor_head = None
+
+ def forward(
+ self,
+ tgt,
+ memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
+ # for memory
+ level_start_index: Optional[Tensor] = None, # num_levels
+ spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
+ valid_ratios: Optional[Tensor] = None,
+ # for text
+ memory_text: Optional[Tensor] = None,
+ text_attention_mask: Optional[Tensor] = None,
+ ):
+ """
+ Input:
+ - tgt: nq, bs, d_model
+ - memory: hw, bs, d_model
+ - pos: hw, bs, d_model
+ - refpoints_unsigmoid: nq, bs, 2/4
+ - valid_ratios/spatial_shapes: bs, nlevel, 2
+ """
+ output = tgt
+
+ intermediate = []
+ reference_points = refpoints_unsigmoid.sigmoid()
+ ref_points = [reference_points]
+
+ for layer_id, layer in enumerate(self.layers):
+
+ if reference_points.shape[-1] == 4:
+ reference_points_input = (
+ reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[None, :]
+ ) # nq, bs, nlevel, 4
+ else:
+ assert reference_points.shape[-1] == 2
+ reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
+ query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2
+
+ # conditional query
+ raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
+ pos_scale = self.query_scale(output) if self.query_scale is not None else 1
+ query_pos = pos_scale * raw_query_pos
+ # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
+ # if query_pos.isnan().any() | query_pos.isinf().any():
+ # import ipdb; ipdb.set_trace()
+
+ # main process
+ output = layer(
+ tgt=output,
+ tgt_query_pos=query_pos,
+ tgt_query_sine_embed=query_sine_embed,
+ tgt_key_padding_mask=tgt_key_padding_mask,
+ tgt_reference_points=reference_points_input,
+ memory_text=memory_text,
+ text_attention_mask=text_attention_mask,
+ memory=memory,
+ memory_key_padding_mask=memory_key_padding_mask,
+ memory_level_start_index=level_start_index,
+ memory_spatial_shapes=spatial_shapes,
+ memory_pos=pos,
+ self_attn_mask=tgt_mask,
+ cross_attn_mask=memory_mask,
+ )
+ if output.isnan().any() | output.isinf().any():
+ print(f"output layer_id {layer_id} is nan")
+ try:
+ num_nan = output.isnan().sum().item()
+ num_inf = output.isinf().sum().item()
+ print(f"num_nan {num_nan}, num_inf {num_inf}")
+ except Exception as e:
+ print(e)
+ # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
+ # import ipdb; ipdb.set_trace()
+
+ # iter update
+ if self.bbox_embed is not None:
+ # box_holder = self.bbox_embed(output)
+ # box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points)
+ # new_reference_points = box_holder[..., :self.query_dim].sigmoid()
+
+ reference_before_sigmoid = inverse_sigmoid(reference_points)
+ delta_unsig = self.bbox_embed[layer_id](output)
+ outputs_unsig = delta_unsig + reference_before_sigmoid
+ new_reference_points = outputs_unsig.sigmoid()
+
+ reference_points = new_reference_points.detach()
+ # if layer_id != self.num_layers - 1:
+ ref_points.append(new_reference_points)
+
+ intermediate.append(self.norm(output))
+
+ return [
+ [itm_out.transpose(0, 1) for itm_out in intermediate],
+ [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
+ ]
+
+
+class DeformableTransformerEncoderLayer(nn.Module):
+ def __init__(
+ self,
+ d_model=256,
+ d_ffn=1024,
+ dropout=0.1,
+ activation="relu",
+ n_levels=4,
+ n_heads=8,
+ n_points=4,
+ ):
+ super().__init__()
+
+ # self attention
+ self.self_attn = MSDeformAttn(
+ embed_dim=d_model,
+ num_levels=n_levels,
+ num_heads=n_heads,
+ num_points=n_points,
+ batch_first=True,
+ )
+ self.dropout1 = nn.Dropout(dropout)
+ self.norm1 = nn.LayerNorm(d_model)
+
+ # ffn
+ self.linear1 = nn.Linear(d_model, d_ffn)
+ self.activation = _get_activation_fn(activation, d_model=d_ffn)
+ self.dropout2 = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(d_ffn, d_model)
+ self.dropout3 = nn.Dropout(dropout)
+ self.norm2 = nn.LayerNorm(d_model)
+
+ @staticmethod
+ def with_pos_embed(tensor, pos):
+ return tensor if pos is None else tensor + pos
+
+ def forward_ffn(self, src):
+ src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
+ src = src + self.dropout3(src2)
+ src = self.norm2(src)
+ return src
+
+ def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None):
+ # self attention
+ # import ipdb; ipdb.set_trace()
+ src2 = self.self_attn(
+ query=self.with_pos_embed(src, pos),
+ reference_points=reference_points,
+ value=src,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ key_padding_mask=key_padding_mask,
+ )
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+
+ # ffn
+ src = self.forward_ffn(src)
+
+ return src
+
+
+class DeformableTransformerDecoderLayer(nn.Module):
+ def __init__(
+ self,
+ d_model=256,
+ d_ffn=1024,
+ dropout=0.1,
+ activation="relu",
+ n_levels=4,
+ n_heads=8,
+ n_points=4,
+ use_text_feat_guide=False,
+ use_text_cross_attention=False,
+ ):
+ super().__init__()
+
+ # cross attention
+ self.cross_attn = MSDeformAttn(
+ embed_dim=d_model,
+ num_levels=n_levels,
+ num_heads=n_heads,
+ num_points=n_points,
+ batch_first=True,
+ )
+ self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
+ self.norm1 = nn.LayerNorm(d_model)
+
+ # cross attention text
+ if use_text_cross_attention:
+ self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
+ self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
+ self.catext_norm = nn.LayerNorm(d_model)
+
+ # self attention
+ self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
+ self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
+ self.norm2 = nn.LayerNorm(d_model)
+
+ # ffn
+ self.linear1 = nn.Linear(d_model, d_ffn)
+ self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)
+ self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
+ self.linear2 = nn.Linear(d_ffn, d_model)
+ self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
+ self.norm3 = nn.LayerNorm(d_model)
+
+ self.key_aware_proj = None
+ self.use_text_feat_guide = use_text_feat_guide
+ assert not use_text_feat_guide
+ self.use_text_cross_attention = use_text_cross_attention
+
+ def rm_self_attn_modules(self):
+ self.self_attn = None
+ self.dropout2 = None
+ self.norm2 = None
+
+ @staticmethod
+ def with_pos_embed(tensor, pos):
+ return tensor if pos is None else tensor + pos
+
+ def forward_ffn(self, tgt):
+ with torch.cuda.amp.autocast(enabled=False):
+ tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
+ tgt = tgt + self.dropout4(tgt2)
+ tgt = self.norm3(tgt)
+ return tgt
+
+ def forward(
+ self,
+ # for tgt
+ tgt: Optional[Tensor], # nq, bs, d_model
+ tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
+ tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
+ memory_text: Optional[Tensor] = None, # bs, num_token, d_model
+ text_attention_mask: Optional[Tensor] = None, # bs, num_token
+ # for memory
+ memory: Optional[Tensor] = None, # hw, bs, d_model
+ memory_key_padding_mask: Optional[Tensor] = None,
+ memory_level_start_index: Optional[Tensor] = None, # num_levels
+ memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
+ memory_pos: Optional[Tensor] = None, # pos for memory
+ # sa
+ self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
+ cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
+ ):
+ """
+ Input:
+ - tgt/tgt_query_pos: nq, bs, d_model
+ -
+ """
+ assert cross_attn_mask is None
+
+ # self attention
+ if self.self_attn is not None:
+ # import ipdb; ipdb.set_trace()
+ q = k = self.with_pos_embed(tgt, tgt_query_pos)
+ tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]
+ tgt = tgt + self.dropout2(tgt2)
+ tgt = self.norm2(tgt)
+
+ if self.use_text_cross_attention:
+ tgt2 = self.ca_text(
+ self.with_pos_embed(tgt, tgt_query_pos),
+ memory_text.transpose(0, 1),
+ memory_text.transpose(0, 1),
+ key_padding_mask=text_attention_mask,
+ )[0]
+ tgt = tgt + self.catext_dropout(tgt2)
+ tgt = self.catext_norm(tgt)
+
+ tgt2 = self.cross_attn(
+ query=self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
+ reference_points=tgt_reference_points.transpose(0, 1).contiguous(),
+ value=memory.transpose(0, 1),
+ spatial_shapes=memory_spatial_shapes,
+ level_start_index=memory_level_start_index,
+ key_padding_mask=memory_key_padding_mask,
+ ).transpose(0, 1)
+ tgt = tgt + self.dropout1(tgt2)
+ tgt = self.norm1(tgt)
+
+ # ffn
+ tgt = self.forward_ffn(tgt)
+
+ return tgt
+
+
+def build_transformer(args):
+ return Transformer(
+ d_model=args.hidden_dim,
+ dropout=args.dropout,
+ nhead=args.nheads,
+ num_queries=args.num_queries,
+ dim_feedforward=args.dim_feedforward,
+ num_encoder_layers=args.enc_layers,
+ num_decoder_layers=args.dec_layers,
+ normalize_before=args.pre_norm,
+ return_intermediate_dec=True,
+ query_dim=args.query_dim,
+ activation=args.transformer_activation,
+ num_patterns=args.num_patterns,
+ num_feature_levels=args.num_feature_levels,
+ enc_n_points=args.enc_n_points,
+ dec_n_points=args.dec_n_points,
+ learnable_tgt_init=True,
+ # two stage
+ two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']
+ embed_init_tgt=args.embed_init_tgt,
+ use_text_enhancer=args.use_text_enhancer,
+ use_fusion_layer=args.use_fusion_layer,
+ use_checkpoint=args.use_checkpoint,
+ use_transformer_ckpt=args.use_transformer_ckpt,
+ use_text_cross_attention=args.use_text_cross_attention,
+ text_dropout=args.text_dropout,
+ fusion_dropout=args.fusion_dropout,
+ fusion_droppath=args.fusion_droppath,
+ )
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer_vanilla.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer_vanilla.py
new file mode 100644
index 00000000..10c0920c
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/transformer_vanilla.py
@@ -0,0 +1,123 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+DETR Transformer class.
+
+Copy-paste from torch.nn.Transformer with modifications:
+ * positional encodings are passed in MHattention
+ * extra LN at the end of encoder is removed
+ * decoder returns a stack of activations from all decoding layers
+"""
+from typing import Optional
+
+import torch
+import torch.nn.functional as F
+from torch import Tensor, nn
+
+from .utils import (
+ MLP,
+ _get_activation_fn,
+ _get_clones,
+ gen_encoder_output_proposals,
+ gen_sineembed_for_position,
+ sigmoid_focal_loss,
+)
+
+
+class TextTransformer(nn.Module):
+ def __init__(self, num_layers, d_model=256, nheads=8, dim_feedforward=2048, dropout=0.1):
+ super().__init__()
+ self.num_layers = num_layers
+ self.d_model = d_model
+ self.nheads = nheads
+ self.dim_feedforward = dim_feedforward
+ self.norm = None
+
+ single_encoder_layer = TransformerEncoderLayer(
+ d_model=d_model, nhead=nheads, dim_feedforward=dim_feedforward, dropout=dropout
+ )
+ self.layers = _get_clones(single_encoder_layer, num_layers)
+
+ def forward(self, memory_text: torch.Tensor, text_attention_mask: torch.Tensor):
+ """
+
+ Args:
+ text_attention_mask: bs, num_token
+ memory_text: bs, num_token, d_model
+
+ Raises:
+ RuntimeError: _description_
+
+ Returns:
+ output: bs, num_token, d_model
+ """
+
+ output = memory_text.transpose(0, 1)
+
+ for layer in self.layers:
+ output = layer(output, src_key_padding_mask=text_attention_mask)
+
+ if self.norm is not None:
+ output = self.norm(output)
+
+ return output.transpose(0, 1)
+
+
+class TransformerEncoderLayer(nn.Module):
+ def __init__(
+ self,
+ d_model,
+ nhead,
+ dim_feedforward=2048,
+ dropout=0.1,
+ activation="relu",
+ normalize_before=False,
+ ):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+
+ self.activation = _get_activation_fn(activation)
+ self.normalize_before = normalize_before
+ self.nhead = nhead
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward(
+ self,
+ src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ ):
+ # repeat attn mask
+ if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]:
+ # bs, num_q, num_k
+ src_mask = src_mask.repeat(self.nhead, 1, 1)
+
+ q = k = self.with_pos_embed(src, pos)
+
+ src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0]
+
+ # src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ src = src + self.dropout2(src2)
+ src = self.norm2(src)
+ return src
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/utils.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/utils.py
new file mode 100644
index 00000000..03b77d99
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/GroundingDINO/utils.py
@@ -0,0 +1,256 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+
+import copy
+import math
+
+import torch
+import torch.nn.functional as F
+from torch import Tensor, nn
+
+
+def _get_clones(module, N, layer_share=False):
+ # import ipdb; ipdb.set_trace()
+ if layer_share:
+ return nn.ModuleList([module for i in range(N)])
+ else:
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
+
+
+def get_sine_pos_embed(
+ pos_tensor: torch.Tensor,
+ num_pos_feats: int = 128,
+ temperature: int = 10000,
+ exchange_xy: bool = True,
+):
+ """generate sine position embedding from a position tensor
+ Args:
+ pos_tensor (torch.Tensor): shape: [..., n].
+ num_pos_feats (int): projected shape for each float in the tensor.
+ temperature (int): temperature in the sine/cosine function.
+ exchange_xy (bool, optional): exchange pos x and pos y. \
+ For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
+ Returns:
+ pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
+ """
+ scale = 2 * math.pi
+ dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
+ dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
+
+ def sine_func(x: torch.Tensor):
+ sin_x = x * scale / dim_t
+ sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
+ return sin_x
+
+ pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
+ if exchange_xy:
+ pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
+ pos_res = torch.cat(pos_res, dim=-1)
+ return pos_res
+
+
+def gen_encoder_output_proposals(memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None):
+ """
+ Input:
+ - memory: bs, \sum{hw}, d_model
+ - memory_padding_mask: bs, \sum{hw}
+ - spatial_shapes: nlevel, 2
+ - learnedwh: 2
+ Output:
+ - output_memory: bs, \sum{hw}, d_model
+ - output_proposals: bs, \sum{hw}, 4
+ """
+ N_, S_, C_ = memory.shape
+ proposals = []
+ _cur = 0
+ for lvl, (H_, W_) in enumerate(spatial_shapes):
+ mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
+ valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
+ valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
+
+ # import ipdb; ipdb.set_trace()
+
+ grid_y, grid_x = torch.meshgrid(
+ torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
+ torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
+ )
+ grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
+
+ scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
+ grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
+
+ if learnedwh is not None:
+ # import ipdb; ipdb.set_trace()
+ wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
+ else:
+ wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
+
+ # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
+ # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
+ # wh = torch.ones_like(grid) / scale
+ proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
+ proposals.append(proposal)
+ _cur += H_ * W_
+ # import ipdb; ipdb.set_trace()
+ output_proposals = torch.cat(proposals, 1)
+ output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
+ output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
+ output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
+ output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
+
+ output_memory = memory
+ output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
+ output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
+
+ # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
+ # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
+
+ return output_memory, output_proposals
+
+
+class RandomBoxPerturber:
+ def __init__(self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2) -> None:
+ self.noise_scale = torch.Tensor([x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale])
+
+ def __call__(self, refanchors: Tensor) -> Tensor:
+ nq, bs, query_dim = refanchors.shape
+ device = refanchors.device
+
+ noise_raw = torch.rand_like(refanchors)
+ noise_scale = self.noise_scale.to(device)[:query_dim]
+
+ new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)
+ return new_refanchors.clamp_(0, 1)
+
+
+def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False):
+ """
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ alpha: (optional) Weighting factor in range (0,1) to balance
+ positive vs negative examples. Default = -1 (no weighting).
+ gamma: Exponent of the modulating factor (1 - p_t) to
+ balance easy vs hard examples.
+ Returns:
+ Loss tensor
+ """
+ prob = inputs.sigmoid()
+ ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ p_t = prob * targets + (1 - prob) * (1 - targets)
+ loss = ce_loss * ((1 - p_t) ** gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ if no_reduction:
+ return loss
+
+ return loss.mean(1).sum() / num_boxes
+
+
+class MLP(nn.Module):
+ """Very simple multi-layer perceptron (also called FFN)"""
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+def _get_activation_fn(activation, d_model=256, batch_dim=0):
+ """Return an activation function given a string"""
+ if activation == "relu":
+ return F.relu
+ if activation == "gelu":
+ return F.gelu
+ if activation == "glu":
+ return F.glu
+ if activation == "prelu":
+ return nn.PReLU()
+ if activation == "selu":
+ return F.selu
+
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
+
+
+def gen_sineembed_for_position(pos_tensor):
+ # n_query, bs, _ = pos_tensor.size()
+ # sineembed_tensor = torch.zeros(n_query, bs, 256)
+ scale = 2 * math.pi
+ dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
+ dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / 128)
+ x_embed = pos_tensor[:, :, 0] * scale
+ y_embed = pos_tensor[:, :, 1] * scale
+ pos_x = x_embed[:, :, None] / dim_t
+ pos_y = y_embed[:, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
+ pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
+ if pos_tensor.size(-1) == 2:
+ pos = torch.cat((pos_y, pos_x), dim=2)
+ elif pos_tensor.size(-1) == 4:
+ w_embed = pos_tensor[:, :, 2] * scale
+ pos_w = w_embed[:, :, None] / dim_t
+ pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
+
+ h_embed = pos_tensor[:, :, 3] * scale
+ pos_h = h_embed[:, :, None] / dim_t
+ pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
+
+ pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
+ else:
+ raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
+ return pos
+
+
+class ContrastiveEmbed(nn.Module):
+ def __init__(self, max_text_len=256):
+ """
+ Args:
+ max_text_len: max length of text.
+ """
+ super().__init__()
+ self.max_text_len = max_text_len
+
+ def forward(self, x, text_dict):
+ """_summary_
+
+ Args:
+ x (_type_): _description_
+ text_dict (_type_): _description_
+ {
+ 'encoded_text': encoded_text, # bs, 195, d_model
+ 'text_token_mask': text_token_mask, # bs, 195
+ # True for used tokens. False for padding tokens
+ }
+ Returns:
+ _type_: _description_
+ """
+ assert isinstance(text_dict, dict)
+
+ y = text_dict["encoded_text"]
+ text_token_mask = text_dict["text_token_mask"]
+
+ res = x @ y.transpose(-1, -2)
+ res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
+
+ # padding to max_text_len
+ new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
+ new_res[..., : res.shape[-1]] = res
+
+ return new_res
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/__init__.py
new file mode 100644
index 00000000..e3413961
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/__init__.py
@@ -0,0 +1,18 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+from .GroundingDINO import build_groundingdino
+
+
+def build_model(args):
+ # we use register to maintain models from catdet6 on.
+ from .registry import MODULE_BUILD_FUNCS
+
+ assert args.modelname in MODULE_BUILD_FUNCS._module_dict
+ build_func = MODULE_BUILD_FUNCS.get(args.modelname)
+ model = build_func(args)
+ return model
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/models/registry.py b/projects/PCSegSAM2/grounding_dino/groundingdino/models/registry.py
new file mode 100644
index 00000000..fb4715fa
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/models/registry.py
@@ -0,0 +1,60 @@
+# ------------------------------------------------------------------------
+# Grounding DINO
+# url: https://github.com/IDEA-Research/GroundingDINO
+# Copyright (c) 2023 IDEA. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+# ------------------------------------------------------------------------
+# -*- coding: utf-8 -*-
+# @Author: Yihao Chen
+# @Date: 2021-08-16 16:03:17
+# @Last Modified by: Shilong Liu
+# @Last Modified time: 2022-01-23 15:26
+# modified from mmcv
+
+import inspect
+from functools import partial
+
+
+class Registry(object):
+ def __init__(self, name):
+ self._name = name
+ self._module_dict = dict()
+
+ def __repr__(self):
+ format_str = self.__class__.__name__ + "(name={}, items={})".format(self._name, list(self._module_dict.keys()))
+ return format_str
+
+ def __len__(self):
+ return len(self._module_dict)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def module_dict(self):
+ return self._module_dict
+
+ def get(self, key):
+ return self._module_dict.get(key, None)
+
+ def registe_with_name(self, module_name=None, force=False):
+ return partial(self.register, module_name=module_name, force=force)
+
+ def register(self, module_build_function, module_name=None, force=False):
+ """Register a module build function.
+ Args:
+ module (:obj:`nn.Module`): Module to be registered.
+ """
+ if not inspect.isfunction(module_build_function):
+ raise TypeError("module_build_function must be a function, but got {}".format(type(module_build_function)))
+ if module_name is None:
+ module_name = module_build_function.__name__
+ if not force and module_name in self._module_dict:
+ raise KeyError("{} is already registered in {}".format(module_name, self.name))
+ self._module_dict[module_name] = module_build_function
+
+ return module_build_function
+
+
+MODULE_BUILD_FUNCS = Registry("model build functions")
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/__init__.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/__init__.py
new file mode 100644
index 00000000..168f9979
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/box_ops.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/box_ops.py
new file mode 100644
index 00000000..781068d2
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/box_ops.py
@@ -0,0 +1,140 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+Utilities for bounding box manipulation and GIoU.
+"""
+import torch
+from torchvision.ops.boxes import box_area
+
+
+def box_cxcywh_to_xyxy(x):
+ x_c, y_c, w, h = x.unbind(-1)
+ b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
+ return torch.stack(b, dim=-1)
+
+
+def box_xyxy_to_cxcywh(x):
+ x0, y0, x1, y1 = x.unbind(-1)
+ b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
+ return torch.stack(b, dim=-1)
+
+
+# modified from torchvision to also return the union
+def box_iou(boxes1, boxes2):
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ # import ipdb; ipdb.set_trace()
+ lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
+ rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
+
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
+ inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
+
+ union = area1[:, None] + area2 - inter
+
+ iou = inter / (union + 1e-6)
+ return iou, union
+
+
+def generalized_box_iou(boxes1, boxes2):
+ """
+ Generalized IoU from https://giou.stanford.edu/
+
+ The boxes should be in [x0, y0, x1, y1] format
+
+ Returns a [N, M] pairwise matrix, where N = len(boxes1)
+ and M = len(boxes2)
+ """
+ # degenerate boxes gives inf / nan results
+ # so do an early check
+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
+ # except:
+ # import ipdb; ipdb.set_trace()
+ iou, union = box_iou(boxes1, boxes2)
+
+ lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
+ rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
+
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
+ area = wh[:, :, 0] * wh[:, :, 1]
+
+ return iou - (area - union) / (area + 1e-6)
+
+
+# modified from torchvision to also return the union
+def box_iou_pairwise(boxes1, boxes2):
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2]
+ rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2]
+
+ wh = (rb - lt).clamp(min=0) # [N,2]
+ inter = wh[:, 0] * wh[:, 1] # [N]
+
+ union = area1 + area2 - inter
+
+ iou = inter / union
+ return iou, union
+
+
+def generalized_box_iou_pairwise(boxes1, boxes2):
+ """
+ Generalized IoU from https://giou.stanford.edu/
+
+ Input:
+ - boxes1, boxes2: N,4
+ Output:
+ - giou: N, 4
+ """
+ # degenerate boxes gives inf / nan results
+ # so do an early check
+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
+ assert boxes1.shape == boxes2.shape
+ iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4
+
+ lt = torch.min(boxes1[:, :2], boxes2[:, :2])
+ rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])
+
+ wh = (rb - lt).clamp(min=0) # [N,2]
+ area = wh[:, 0] * wh[:, 1]
+
+ return iou - (area - union) / area
+
+
+def masks_to_boxes(masks):
+ """Compute the bounding boxes around the provided masks
+
+ The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
+
+ Returns a [N, 4] tensors, with the boxes in xyxy format
+ """
+ if masks.numel() == 0:
+ return torch.zeros((0, 4), device=masks.device)
+
+ h, w = masks.shape[-2:]
+
+ y = torch.arange(0, h, dtype=torch.float)
+ x = torch.arange(0, w, dtype=torch.float)
+ y, x = torch.meshgrid(y, x)
+
+ x_mask = masks * x.unsqueeze(0)
+ x_max = x_mask.flatten(1).max(-1)[0]
+ x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
+
+ y_mask = masks * y.unsqueeze(0)
+ y_max = y_mask.flatten(1).max(-1)[0]
+ y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
+
+ return torch.stack([x_min, y_min, x_max, y_max], 1)
+
+
+if __name__ == "__main__":
+ x = torch.rand(5, 4)
+ y = torch.rand(3, 4)
+ iou, union = box_iou(x, y)
+ import ipdb
+
+ ipdb.set_trace()
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/get_tokenlizer.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/get_tokenlizer.py
new file mode 100644
index 00000000..54b7285c
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/get_tokenlizer.py
@@ -0,0 +1,31 @@
+import os
+
+from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
+
+
+def get_tokenlizer(text_encoder_type):
+ if not isinstance(text_encoder_type, str):
+ # print("text_encoder_type is not a str")
+ if hasattr(text_encoder_type, "text_encoder_type"):
+ text_encoder_type = text_encoder_type.text_encoder_type
+ elif text_encoder_type.get("text_encoder_type", False):
+ text_encoder_type = text_encoder_type.get("text_encoder_type")
+ elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):
+ pass
+ else:
+ raise ValueError("Unknown type of text_encoder_type: {}".format(type(text_encoder_type)))
+ print("final text_encoder_type: {}".format(text_encoder_type))
+
+ tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)
+ return tokenizer
+
+
+def get_pretrained_language_model(text_encoder_type):
+ if text_encoder_type == "bert-base-uncased" or (
+ os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type)
+ ):
+ return BertModel.from_pretrained(text_encoder_type)
+ if text_encoder_type == "roberta-base":
+ return RobertaModel.from_pretrained(text_encoder_type)
+
+ raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type))
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/inference.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/inference.py
new file mode 100644
index 00000000..c840ca2d
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/inference.py
@@ -0,0 +1,244 @@
+import bisect
+from typing import List, Tuple
+
+import cv2
+import groundingdino.datasets.transforms as T
+import numpy as np
+import supervision as sv
+import torch
+from groundingdino.models import build_model
+from groundingdino.util.misc import clean_state_dict
+from groundingdino.util.slconfig import SLConfig
+from groundingdino.util.utils import get_phrases_from_posmap
+from PIL import Image
+from torchvision.ops import box_convert
+
+# ----------------------------------------------------------------------------------------------------------------------
+# OLD API
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+def preprocess_caption(caption: str) -> str:
+ result = caption.lower().strip()
+ if result.endswith("."):
+ return result
+ return result + "."
+
+
+def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
+ args = SLConfig.fromfile(model_config_path)
+ args.device = device
+ model = build_model(args)
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
+ model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
+ model.eval()
+ return model
+
+
+def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
+ transform = T.Compose(
+ [
+ T.RandomResize([800], max_size=1333),
+ T.ToTensor(),
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ]
+ )
+ image_source = Image.open(image_path).convert("RGB")
+ image = np.asarray(image_source)
+ image_transformed, _ = transform(image_source, None)
+ return image, image_transformed
+
+
+def predict(
+ model,
+ image: torch.Tensor,
+ caption: str,
+ box_threshold: float,
+ text_threshold: float,
+ device: str = "cuda",
+ remove_combined: bool = False,
+) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
+ caption = preprocess_caption(caption=caption)
+
+ model = model.to(device)
+ image = image.to(device)
+
+ with torch.no_grad():
+ outputs = model(image[None], captions=[caption])
+
+ prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
+ prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
+
+ mask = prediction_logits.max(dim=1)[0] > box_threshold
+ logits = prediction_logits[mask] # logits.shape = (n, 256)
+ boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
+
+ tokenizer = model.tokenizer
+ tokenized = tokenizer(caption)
+
+ if remove_combined:
+ sep_idx = [i for i in range(len(tokenized["input_ids"])) if tokenized["input_ids"][i] in [101, 102, 1012]]
+
+ phrases = []
+ for logit in logits:
+ max_idx = logit.argmax()
+ insert_idx = bisect.bisect_left(sep_idx, max_idx)
+ right_idx = sep_idx[insert_idx]
+ left_idx = sep_idx[insert_idx - 1]
+ phrases.append(
+ get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer, left_idx, right_idx).replace(
+ ".", ""
+ )
+ )
+ else:
+ phrases = [
+ get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace(".", "") for logit in logits
+ ]
+
+ return boxes, logits.max(dim=1)[0], phrases
+
+
+def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str]) -> np.ndarray:
+ """
+ This function annotates an image with bounding boxes and labels.
+
+ Parameters:
+ image_source (np.ndarray): The source image to be annotated.
+ boxes (torch.Tensor): A tensor containing bounding box coordinates.
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
+ phrases (List[str]): A list of labels for each bounding box.
+
+ Returns:
+ np.ndarray: The annotated image.
+ """
+ h, w, _ = image_source.shape
+ boxes = boxes * torch.Tensor([w, h, w, h])
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
+ detections = sv.Detections(xyxy=xyxy)
+
+ labels = [f"{phrase} {logit:.2f}" for phrase, logit in zip(phrases, logits)]
+
+ bbox_annotator = sv.BoxAnnotator(color_lookup=sv.ColorLookup.INDEX)
+ label_annotator = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX)
+ annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR)
+ annotated_frame = bbox_annotator.annotate(scene=annotated_frame, detections=detections)
+ annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
+ return annotated_frame
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# NEW API
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+class Model:
+
+ def __init__(self, model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
+ self.model = load_model(
+ model_config_path=model_config_path, model_checkpoint_path=model_checkpoint_path, device=device
+ ).to(device)
+ self.device = device
+
+ def predict_with_caption(
+ self, image: np.ndarray, caption: str, box_threshold: float = 0.35, text_threshold: float = 0.25
+ ) -> Tuple[sv.Detections, List[str]]:
+ """
+ import cv2
+
+ image = cv2.imread(IMAGE_PATH)
+
+ model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
+ detections, labels = model.predict_with_caption(
+ image=image,
+ caption=caption,
+ box_threshold=BOX_THRESHOLD,
+ text_threshold=TEXT_THRESHOLD
+ )
+
+ import supervision as sv
+
+ box_annotator = sv.BoxAnnotator()
+ annotated_image = box_annotator.annotate(scene=image, detections=detections, labels=labels)
+ """
+ processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
+ boxes, logits, phrases = predict(
+ model=self.model,
+ image=processed_image,
+ caption=caption,
+ box_threshold=box_threshold,
+ text_threshold=text_threshold,
+ device=self.device,
+ )
+ source_h, source_w, _ = image.shape
+ detections = Model.post_process_result(source_h=source_h, source_w=source_w, boxes=boxes, logits=logits)
+ return detections, phrases
+
+ def predict_with_classes(
+ self, image: np.ndarray, classes: List[str], box_threshold: float, text_threshold: float
+ ) -> sv.Detections:
+ """
+ import cv2
+
+ image = cv2.imread(IMAGE_PATH)
+
+ model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
+ detections = model.predict_with_classes(
+ image=image,
+ classes=CLASSES,
+ box_threshold=BOX_THRESHOLD,
+ text_threshold=TEXT_THRESHOLD
+ )
+
+
+ import supervision as sv
+
+ box_annotator = sv.BoxAnnotator()
+ annotated_image = box_annotator.annotate(scene=image, detections=detections)
+ """
+ caption = ". ".join(classes)
+ processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
+ boxes, logits, phrases = predict(
+ model=self.model,
+ image=processed_image,
+ caption=caption,
+ box_threshold=box_threshold,
+ text_threshold=text_threshold,
+ device=self.device,
+ )
+ source_h, source_w, _ = image.shape
+ detections = Model.post_process_result(source_h=source_h, source_w=source_w, boxes=boxes, logits=logits)
+ class_id = Model.phrases2classes(phrases=phrases, classes=classes)
+ detections.class_id = class_id
+ return detections
+
+ @staticmethod
+ def preprocess_image(image_bgr: np.ndarray) -> torch.Tensor:
+ transform = T.Compose(
+ [
+ T.RandomResize([800], max_size=1333),
+ T.ToTensor(),
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ]
+ )
+ image_pillow = Image.fromarray(cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB))
+ image_transformed, _ = transform(image_pillow, None)
+ return image_transformed
+
+ @staticmethod
+ def post_process_result(source_h: int, source_w: int, boxes: torch.Tensor, logits: torch.Tensor) -> sv.Detections:
+ boxes = boxes * torch.Tensor([source_w, source_h, source_w, source_h])
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
+ confidence = logits.numpy()
+ return sv.Detections(xyxy=xyxy, confidence=confidence)
+
+ @staticmethod
+ def phrases2classes(phrases: List[str], classes: List[str]) -> np.ndarray:
+ class_ids = []
+ for phrase in phrases:
+ for class_ in classes:
+ if class_ in phrase:
+ class_ids.append(classes.index(class_))
+ break
+ else:
+ class_ids.append(None)
+ return np.array(class_ids)
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/logger.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/logger.py
new file mode 100644
index 00000000..679e0f59
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/logger.py
@@ -0,0 +1,91 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+import functools
+import logging
+import os
+import sys
+
+from termcolor import colored
+
+
+class _ColorfulFormatter(logging.Formatter):
+ def __init__(self, *args, **kwargs):
+ self._root_name = kwargs.pop("root_name") + "."
+ self._abbrev_name = kwargs.pop("abbrev_name", "")
+ if len(self._abbrev_name):
+ self._abbrev_name = self._abbrev_name + "."
+ super(_ColorfulFormatter, self).__init__(*args, **kwargs)
+
+ def formatMessage(self, record):
+ record.name = record.name.replace(self._root_name, self._abbrev_name)
+ log = super(_ColorfulFormatter, self).formatMessage(record)
+ if record.levelno == logging.WARNING:
+ prefix = colored("WARNING", "red", attrs=["blink"])
+ elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
+ prefix = colored("ERROR", "red", attrs=["blink", "underline"])
+ else:
+ return log
+ return prefix + " " + log
+
+
+# so that calling setup_logger multiple times won't add many handlers
+@functools.lru_cache()
+def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
+ """
+ Initialize the detectron2 logger and set its verbosity level to "INFO".
+
+ Args:
+ output (str): a file name or a directory to save log. If None, will not save log file.
+ If ends with ".txt" or ".log", assumed to be a file name.
+ Otherwise, logs will be saved to `output/log.txt`.
+ name (str): the root module name of this logger
+
+ Returns:
+ logging.Logger: a logger
+ """
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG)
+ logger.propagate = False
+
+ if abbrev_name is None:
+ abbrev_name = name
+
+ plain_formatter = logging.Formatter("[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S")
+ # stdout logging: master only
+ if distributed_rank == 0:
+ ch = logging.StreamHandler(stream=sys.stdout)
+ ch.setLevel(logging.DEBUG)
+ if color:
+ formatter = _ColorfulFormatter(
+ colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
+ datefmt="%m/%d %H:%M:%S",
+ root_name=name,
+ abbrev_name=str(abbrev_name),
+ )
+ else:
+ formatter = plain_formatter
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ # file logging: all workers
+ if output is not None:
+ if output.endswith(".txt") or output.endswith(".log"):
+ filename = output
+ else:
+ filename = os.path.join(output, "log.txt")
+ if distributed_rank > 0:
+ filename = filename + f".rank{distributed_rank}"
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
+
+ fh = logging.StreamHandler(_cached_log_stream(filename))
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(plain_formatter)
+ logger.addHandler(fh)
+
+ return logger
+
+
+# cache the opened file object, so that different calls to `setup_logger`
+# with the same file name can safely write to the same file.
+@functools.lru_cache(maxsize=None)
+def _cached_log_stream(filename):
+ return open(filename, "a")
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/misc.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/misc.py
new file mode 100644
index 00000000..e82b4b9a
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/misc.py
@@ -0,0 +1,703 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+Misc functions, including distributed helpers.
+
+Mostly copy-paste from torchvision references.
+"""
+import colorsys
+import datetime
+import functools
+import io
+import json
+import os
+import pickle
+import subprocess
+import time
+from collections import OrderedDict, defaultdict, deque
+from typing import List, Optional
+
+import numpy as np
+import torch
+import torch.distributed as dist
+
+# needed due to empty tensor bug in pytorch and torchvision 0.5
+import torchvision
+from torch import Tensor
+
+__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7
+if __torchvision_need_compat_flag:
+ from torchvision.ops import _new_empty_tensor
+ from torchvision.ops.misc import _output_size
+
+
+class SmoothedValue(object):
+ """Track a series of values and provide access to smoothed values over a
+ window or the global series average.
+ """
+
+ def __init__(self, window_size=20, fmt=None):
+ if fmt is None:
+ fmt = "{median:.4f} ({global_avg:.4f})"
+ self.deque = deque(maxlen=window_size)
+ self.total = 0.0
+ self.count = 0
+ self.fmt = fmt
+
+ def update(self, value, n=1):
+ self.deque.append(value)
+ self.count += n
+ self.total += value * n
+
+ def synchronize_between_processes(self):
+ """
+ Warning: does not synchronize the deque!
+ """
+ if not is_dist_avail_and_initialized():
+ return
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
+ dist.barrier()
+ dist.all_reduce(t)
+ t = t.tolist()
+ self.count = int(t[0])
+ self.total = t[1]
+
+ @property
+ def median(self):
+ d = torch.tensor(list(self.deque))
+ if d.shape[0] == 0:
+ return 0
+ return d.median().item()
+
+ @property
+ def avg(self):
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
+ return d.mean().item()
+
+ @property
+ def global_avg(self):
+ if os.environ.get("SHILONG_AMP", None) == "1":
+ eps = 1e-4
+ else:
+ eps = 1e-6
+ return self.total / (self.count + eps)
+
+ @property
+ def max(self):
+ return max(self.deque)
+
+ @property
+ def value(self):
+ return self.deque[-1]
+
+ def __str__(self):
+ return self.fmt.format(
+ median=self.median,
+ avg=self.avg,
+ global_avg=self.global_avg,
+ max=self.max,
+ value=self.value,
+ )
+
+
+@functools.lru_cache()
+def _get_global_gloo_group():
+ """
+ Return a process group based on gloo backend, containing all the ranks
+ The result is cached.
+ """
+
+ if dist.get_backend() == "nccl":
+ return dist.new_group(backend="gloo")
+
+ return dist.group.WORLD
+
+
+def all_gather_cpu(data):
+ """
+ Run all_gather on arbitrary picklable data (not necessarily tensors)
+ Args:
+ data: any picklable object
+ Returns:
+ list[data]: list of data gathered from each rank
+ """
+
+ world_size = get_world_size()
+ if world_size == 1:
+ return [data]
+
+ cpu_group = _get_global_gloo_group()
+
+ buffer = io.BytesIO()
+ torch.save(data, buffer)
+ data_view = buffer.getbuffer()
+ device = "cuda" if cpu_group is None else "cpu"
+ tensor = torch.ByteTensor(data_view).to(device)
+
+ # obtain Tensor size of each rank
+ local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
+ size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
+ if cpu_group is None:
+ dist.all_gather(size_list, local_size)
+ else:
+ print("gathering on cpu")
+ dist.all_gather(size_list, local_size, group=cpu_group)
+ size_list = [int(size.item()) for size in size_list]
+ max_size = max(size_list)
+ assert isinstance(local_size.item(), int)
+ local_size = int(local_size.item())
+
+ # receiving Tensor from all ranks
+ # we pad the tensor because torch all_gather does not support
+ # gathering tensors of different shapes
+ tensor_list = []
+ for _ in size_list:
+ tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
+ if local_size != max_size:
+ padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device)
+ tensor = torch.cat((tensor, padding), dim=0)
+ if cpu_group is None:
+ dist.all_gather(tensor_list, tensor)
+ else:
+ dist.all_gather(tensor_list, tensor, group=cpu_group)
+
+ data_list = []
+ for size, tensor in zip(size_list, tensor_list):
+ tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
+ buffer = io.BytesIO(tensor.cpu().numpy())
+ obj = torch.load(buffer)
+ data_list.append(obj)
+
+ return data_list
+
+
+def all_gather(data):
+ """
+ Run all_gather on arbitrary picklable data (not necessarily tensors)
+ Args:
+ data: any picklable object
+ Returns:
+ list[data]: list of data gathered from each rank
+ """
+
+ if os.getenv("CPU_REDUCE") == "1":
+ return all_gather_cpu(data)
+
+ world_size = get_world_size()
+ if world_size == 1:
+ return [data]
+
+ # serialized to a Tensor
+ buffer = pickle.dumps(data)
+ storage = torch.ByteStorage.from_buffer(buffer)
+ tensor = torch.ByteTensor(storage).to("cuda")
+
+ # obtain Tensor size of each rank
+ local_size = torch.tensor([tensor.numel()], device="cuda")
+ size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
+ dist.all_gather(size_list, local_size)
+ size_list = [int(size.item()) for size in size_list]
+ max_size = max(size_list)
+
+ # receiving Tensor from all ranks
+ # we pad the tensor because torch all_gather does not support
+ # gathering tensors of different shapes
+ tensor_list = []
+ for _ in size_list:
+ tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
+ if local_size != max_size:
+ padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
+ tensor = torch.cat((tensor, padding), dim=0)
+ dist.all_gather(tensor_list, tensor)
+
+ data_list = []
+ for size, tensor in zip(size_list, tensor_list):
+ buffer = tensor.cpu().numpy().tobytes()[:size]
+ data_list.append(pickle.loads(buffer))
+
+ return data_list
+
+
+def reduce_dict(input_dict, average=True):
+ """
+ Args:
+ input_dict (dict): all the values will be reduced
+ average (bool): whether to do average or sum
+ Reduce the values in the dictionary from all processes so that all processes
+ have the averaged results. Returns a dict with the same fields as
+ input_dict, after reduction.
+ """
+ world_size = get_world_size()
+ if world_size < 2:
+ return input_dict
+ with torch.no_grad():
+ names = []
+ values = []
+ # sort the keys so that they are consistent across processes
+ for k in sorted(input_dict.keys()):
+ names.append(k)
+ values.append(input_dict[k])
+ values = torch.stack(values, dim=0)
+ dist.all_reduce(values)
+ if average:
+ values /= world_size
+ reduced_dict = {k: v for k, v in zip(names, values)}
+ return reduced_dict
+
+
+class MetricLogger(object):
+ def __init__(self, delimiter="\t"):
+ self.meters = defaultdict(SmoothedValue)
+ self.delimiter = delimiter
+
+ def update(self, **kwargs):
+ for k, v in kwargs.items():
+ if isinstance(v, torch.Tensor):
+ v = v.item()
+ assert isinstance(v, (float, int))
+ self.meters[k].update(v)
+
+ def __getattr__(self, attr):
+ if attr in self.meters:
+ return self.meters[attr]
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
+
+ def __str__(self):
+ loss_str = []
+ for name, meter in self.meters.items():
+ # print(name, str(meter))
+ # import ipdb;ipdb.set_trace()
+ if meter.count > 0:
+ loss_str.append("{}: {}".format(name, str(meter)))
+ return self.delimiter.join(loss_str)
+
+ def synchronize_between_processes(self):
+ for meter in self.meters.values():
+ meter.synchronize_between_processes()
+
+ def add_meter(self, name, meter):
+ self.meters[name] = meter
+
+ def log_every(self, iterable, print_freq, header=None, logger=None):
+ if logger is None:
+ print_func = print
+ else:
+ print_func = logger.info
+
+ i = 0
+ if not header:
+ header = ""
+ start_time = time.time()
+ end = time.time()
+ iter_time = SmoothedValue(fmt="{avg:.4f}")
+ data_time = SmoothedValue(fmt="{avg:.4f}")
+ space_fmt = ":" + str(len(str(len(iterable)))) + "d"
+ if torch.cuda.is_available():
+ log_msg = self.delimiter.join(
+ [
+ header,
+ "[{0" + space_fmt + "}/{1}]",
+ "eta: {eta}",
+ "{meters}",
+ "time: {time}",
+ "data: {data}",
+ "max mem: {memory:.0f}",
+ ]
+ )
+ else:
+ log_msg = self.delimiter.join(
+ [
+ header,
+ "[{0" + space_fmt + "}/{1}]",
+ "eta: {eta}",
+ "{meters}",
+ "time: {time}",
+ "data: {data}",
+ ]
+ )
+ MB = 1024.0 * 1024.0
+ for obj in iterable:
+ data_time.update(time.time() - end)
+ yield obj
+ # import ipdb; ipdb.set_trace()
+ iter_time.update(time.time() - end)
+ if i % print_freq == 0 or i == len(iterable) - 1:
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
+ if torch.cuda.is_available():
+ print_func(
+ log_msg.format(
+ i,
+ len(iterable),
+ eta=eta_string,
+ meters=str(self),
+ time=str(iter_time),
+ data=str(data_time),
+ memory=torch.cuda.max_memory_allocated() / MB,
+ )
+ )
+ else:
+ print_func(
+ log_msg.format(
+ i,
+ len(iterable),
+ eta=eta_string,
+ meters=str(self),
+ time=str(iter_time),
+ data=str(data_time),
+ )
+ )
+ i += 1
+ end = time.time()
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
+ print_func("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable)))
+
+
+def get_sha():
+ cwd = os.path.dirname(os.path.abspath(__file__))
+
+ def _run(command):
+ return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
+
+ sha = "N/A"
+ diff = "clean"
+ branch = "N/A"
+ try:
+ sha = _run(["git", "rev-parse", "HEAD"])
+ subprocess.check_output(["git", "diff"], cwd=cwd)
+ diff = _run(["git", "diff-index", "HEAD"])
+ diff = "has uncommited changes" if diff else "clean"
+ branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
+ except Exception:
+ pass
+ message = f"sha: {sha}, status: {diff}, branch: {branch}"
+ return message
+
+
+def collate_fn(batch):
+ # import ipdb; ipdb.set_trace()
+ batch = list(zip(*batch))
+ batch[0] = nested_tensor_from_tensor_list(batch[0])
+ return tuple(batch)
+
+
+def _max_by_axis(the_list):
+ # type: (List[List[int]]) -> List[int]
+ maxes = the_list[0]
+ for sublist in the_list[1:]:
+ for index, item in enumerate(sublist):
+ maxes[index] = max(maxes[index], item)
+ return maxes
+
+
+class NestedTensor(object):
+ def __init__(self, tensors, mask: Optional[Tensor]):
+ self.tensors = tensors
+ self.mask = mask
+ if mask == "auto":
+ self.mask = torch.zeros_like(tensors).to(tensors.device)
+ if self.mask.dim() == 3:
+ self.mask = self.mask.sum(0).to(bool)
+ elif self.mask.dim() == 4:
+ self.mask = self.mask.sum(1).to(bool)
+ else:
+ raise ValueError(
+ "tensors dim must be 3 or 4 but {}({})".format(self.tensors.dim(), self.tensors.shape)
+ )
+
+ def imgsize(self):
+ res = []
+ for i in range(self.tensors.shape[0]):
+ mask = self.mask[i]
+ maxH = (~mask).sum(0).max()
+ maxW = (~mask).sum(1).max()
+ res.append(torch.Tensor([maxH, maxW]))
+ return res
+
+ def to(self, device):
+ # type: (Device) -> NestedTensor # noqa
+ cast_tensor = self.tensors.to(device)
+ mask = self.mask
+ if mask is not None:
+ assert mask is not None
+ cast_mask = mask.to(device)
+ else:
+ cast_mask = None
+ return NestedTensor(cast_tensor, cast_mask)
+
+ def to_img_list_single(self, tensor, mask):
+ assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim())
+ maxH = (~mask).sum(0).max()
+ maxW = (~mask).sum(1).max()
+ img = tensor[:, :maxH, :maxW]
+ return img
+
+ def to_img_list(self):
+ """remove the padding and convert to img list
+
+ Returns:
+ [type]: [description]
+ """
+ if self.tensors.dim() == 3:
+ return self.to_img_list_single(self.tensors, self.mask)
+ else:
+ res = []
+ for i in range(self.tensors.shape[0]):
+ tensor_i = self.tensors[i]
+ mask_i = self.mask[i]
+ res.append(self.to_img_list_single(tensor_i, mask_i))
+ return res
+
+ @property
+ def device(self):
+ return self.tensors.device
+
+ def decompose(self):
+ return self.tensors, self.mask
+
+ def __repr__(self):
+ return str(self.tensors)
+
+ @property
+ def shape(self):
+ return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape}
+
+
+def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
+ # TODO make this more general
+ if tensor_list[0].ndim == 3:
+ if torchvision._is_tracing():
+ # nested_tensor_from_tensor_list() does not export well to ONNX
+ # call _onnx_nested_tensor_from_tensor_list() instead
+ return _onnx_nested_tensor_from_tensor_list(tensor_list)
+
+ # TODO make it support different-sized images
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
+ # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
+ batch_shape = [len(tensor_list)] + max_size
+ b, c, h, w = batch_shape
+ dtype = tensor_list[0].dtype
+ device = tensor_list[0].device
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
+ mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
+ m[: img.shape[1], : img.shape[2]] = False
+ else:
+ raise ValueError("not supported")
+ return NestedTensor(tensor, mask)
+
+
+# _onnx_nested_tensor_from_tensor_list() is an implementation of
+# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
+@torch.jit.unused
+def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
+ max_size = []
+ for i in range(tensor_list[0].dim()):
+ max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
+ max_size.append(max_size_i)
+ max_size = tuple(max_size)
+
+ # work around for
+ # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
+ # m[: img.shape[1], :img.shape[2]] = False
+ # which is not yet supported in onnx
+ padded_imgs = []
+ padded_masks = []
+ for img in tensor_list:
+ padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
+ padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
+ padded_imgs.append(padded_img)
+
+ m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
+ padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
+ padded_masks.append(padded_mask.to(torch.bool))
+
+ tensor = torch.stack(padded_imgs)
+ mask = torch.stack(padded_masks)
+
+ return NestedTensor(tensor, mask=mask)
+
+
+def setup_for_distributed(is_master):
+ """
+ This function disables printing when not in master process
+ """
+ import builtins as __builtin__
+
+ builtin_print = __builtin__.print
+
+ def print(*args, **kwargs):
+ force = kwargs.pop("force", False)
+ if is_master or force:
+ builtin_print(*args, **kwargs)
+
+ __builtin__.print = print
+
+
+def is_dist_avail_and_initialized():
+ if not dist.is_available():
+ return False
+ if not dist.is_initialized():
+ return False
+ return True
+
+
+def get_world_size():
+ if not is_dist_avail_and_initialized():
+ return 1
+ return dist.get_world_size()
+
+
+def get_rank():
+ if not is_dist_avail_and_initialized():
+ return 0
+ return dist.get_rank()
+
+
+def is_main_process():
+ return get_rank() == 0
+
+
+def save_on_master(*args, **kwargs):
+ if is_main_process():
+ torch.save(*args, **kwargs)
+
+
+def init_distributed_mode(args):
+ if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and
+ args.rank = int(os.environ["RANK"])
+ args.world_size = int(os.environ["WORLD_SIZE"])
+ args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"])
+
+ # launch by torch.distributed.launch
+ # Single node
+ # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
+ # Multi nodes
+ # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
+ # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
+ # args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK'))
+ # local_world_size = int(os.environ['GPU_PER_NODE_COUNT'])
+ # args.world_size = args.world_size * local_world_size
+ # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
+ # args.rank = args.rank * local_world_size + args.local_rank
+ print("world size: {}, rank: {}, local rank: {}".format(args.world_size, args.rank, args.local_rank))
+ print(json.dumps(dict(os.environ), indent=2))
+ elif "SLURM_PROCID" in os.environ:
+ args.rank = int(os.environ["SLURM_PROCID"])
+ args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"])
+ args.world_size = int(os.environ["SLURM_NPROCS"])
+
+ print(
+ "world size: {}, world rank: {}, local rank: {}, device_count: {}".format(
+ args.world_size, args.rank, args.local_rank, torch.cuda.device_count()
+ )
+ )
+ else:
+ print("Not using distributed mode")
+ args.distributed = False
+ args.world_size = 1
+ args.rank = 0
+ args.local_rank = 0
+ return
+
+ print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank))
+ args.distributed = True
+ torch.cuda.set_device(args.local_rank)
+ args.dist_backend = "nccl"
+ print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
+
+ torch.distributed.init_process_group(
+ backend=args.dist_backend,
+ world_size=args.world_size,
+ rank=args.rank,
+ init_method=args.dist_url,
+ )
+
+ print("Before torch.distributed.barrier()")
+ torch.distributed.barrier()
+ print("End torch.distributed.barrier()")
+ setup_for_distributed(args.rank == 0)
+
+
+@torch.no_grad()
+def accuracy(output, target, topk=(1,)):
+ """Computes the precision@k for the specified values of k"""
+ if target.numel() == 0:
+ return [torch.zeros([], device=output.device)]
+ maxk = max(topk)
+ batch_size = target.size(0)
+
+ _, pred = output.topk(maxk, 1, True, True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+
+ res = []
+ for k in topk:
+ correct_k = correct[:k].view(-1).float().sum(0)
+ res.append(correct_k.mul_(100.0 / batch_size))
+ return res
+
+
+@torch.no_grad()
+def accuracy_onehot(pred, gt):
+ """_summary_
+
+ Args:
+ pred (_type_): n, c
+ gt (_type_): n, c
+ """
+ tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum()
+ acc = tp / gt.shape[0] * 100
+ return acc
+
+
+def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
+ # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
+ """
+ Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
+ This will eventually be supported natively by PyTorch, and this
+ class can go away.
+ """
+ if __torchvision_need_compat_flag < 0.7:
+ if input.numel() > 0:
+ return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
+
+ output_shape = _output_size(2, input, size, scale_factor)
+ output_shape = list(input.shape[:-2]) + list(output_shape)
+ return _new_empty_tensor(input, output_shape)
+ else:
+ return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
+
+
+class color_sys:
+ def __init__(self, num_colors) -> None:
+ self.num_colors = num_colors
+ colors = []
+ for i in np.arange(0.0, 360.0, 360.0 / num_colors):
+ hue = i / 360.0
+ lightness = (50 + np.random.rand() * 10) / 100.0
+ saturation = (90 + np.random.rand() * 10) / 100.0
+ colors.append(tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)]))
+ self.colors = colors
+
+ def __call__(self, idx):
+ return self.colors[idx]
+
+
+def inverse_sigmoid(x, eps=1e-3):
+ x = x.clamp(min=0, max=1)
+ x1 = x.clamp(min=eps)
+ x2 = (1 - x).clamp(min=eps)
+ return torch.log(x1 / x2)
+
+
+def clean_state_dict(state_dict):
+ new_state_dict = OrderedDict()
+ for k, v in state_dict.items():
+ if k[:7] == "module.":
+ k = k[7:] # remove `module.`
+ new_state_dict[k] = v
+ return new_state_dict
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/slconfig.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/slconfig.py
new file mode 100644
index 00000000..d0734318
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/slconfig.py
@@ -0,0 +1,419 @@
+# ==========================================================
+# Modified from mmcv
+# ==========================================================
+import ast
+import os
+import os.path as osp
+import shutil
+import sys
+import tempfile
+from argparse import Action
+from importlib import import_module
+
+from addict import Dict
+from yapf.yapflib.yapf_api import FormatCode
+
+BASE_KEY = "_base_"
+DELETE_KEY = "_delete_"
+RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"]
+
+
+def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
+ if not osp.isfile(filename):
+ raise FileNotFoundError(msg_tmpl.format(filename))
+
+
+class ConfigDict(Dict):
+ def __missing__(self, name):
+ raise KeyError(name)
+
+ def __getattr__(self, name):
+ try:
+ value = super(ConfigDict, self).__getattr__(name)
+ except KeyError:
+ ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'")
+ except Exception as e:
+ ex = e
+ else:
+ return value
+ raise ex
+
+
+class SLConfig(object):
+ """
+ config files.
+ only support .py file as config now.
+
+ ref: mmcv.utils.config
+
+ Example:
+ >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
+ >>> cfg.a
+ 1
+ >>> cfg.b
+ {'b1': [0, 1]}
+ >>> cfg.b.b1
+ [0, 1]
+ >>> cfg = Config.fromfile('tests/data/config/a.py')
+ >>> cfg.filename
+ "/home/kchen/projects/mmcv/tests/data/config/a.py"
+ >>> cfg.item4
+ 'test'
+ >>> cfg
+ "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
+ "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
+ """
+
+ @staticmethod
+ def _validate_py_syntax(filename):
+ with open(filename) as f:
+ content = f.read()
+ try:
+ ast.parse(content)
+ except SyntaxError:
+ raise SyntaxError("There are syntax errors in config " f"file {filename}")
+
+ @staticmethod
+ def _file2dict(filename):
+ filename = osp.abspath(osp.expanduser(filename))
+ check_file_exist(filename)
+ if filename.lower().endswith(".py"):
+ with tempfile.TemporaryDirectory() as temp_config_dir:
+ temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py")
+ temp_config_name = osp.basename(temp_config_file.name)
+ if os.name == "nt":
+ temp_config_file.close()
+ shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name))
+ temp_module_name = osp.splitext(temp_config_name)[0]
+ sys.path.insert(0, temp_config_dir)
+ SLConfig._validate_py_syntax(filename)
+ mod = import_module(temp_module_name)
+ sys.path.pop(0)
+ cfg_dict = {name: value for name, value in mod.__dict__.items() if not name.startswith("__")}
+ # delete imported module
+ del sys.modules[temp_module_name]
+ # close temp file
+ temp_config_file.close()
+ elif filename.lower().endswith((".yml", ".yaml", ".json")):
+ from .slio import slload
+
+ cfg_dict = slload(filename)
+ else:
+ raise IOError("Only py/yml/yaml/json type are supported now!")
+
+ cfg_text = filename + "\n"
+ with open(filename, "r") as f:
+ cfg_text += f.read()
+
+ # parse the base file
+ if BASE_KEY in cfg_dict:
+ cfg_dir = osp.dirname(filename)
+ base_filename = cfg_dict.pop(BASE_KEY)
+ base_filename = base_filename if isinstance(base_filename, list) else [base_filename]
+
+ cfg_dict_list = list()
+ cfg_text_list = list()
+ for f in base_filename:
+ _cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))
+ cfg_dict_list.append(_cfg_dict)
+ cfg_text_list.append(_cfg_text)
+
+ base_cfg_dict = dict()
+ for c in cfg_dict_list:
+ if len(base_cfg_dict.keys() & c.keys()) > 0:
+ raise KeyError("Duplicate key is not allowed among bases")
+ # TODO Allow the duplicate key while warnning user
+ base_cfg_dict.update(c)
+
+ base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)
+ cfg_dict = base_cfg_dict
+
+ # merge cfg_text
+ cfg_text_list.append(cfg_text)
+ cfg_text = "\n".join(cfg_text_list)
+
+ return cfg_dict, cfg_text
+
+ @staticmethod
+ def _merge_a_into_b(a, b):
+ """merge dict `a` into dict `b` (non-inplace).
+ values in `a` will overwrite `b`.
+ copy first to avoid inplace modification
+
+ Args:
+ a ([type]): [description]
+ b ([type]): [description]
+
+ Returns:
+ [dict]: [description]
+ """
+ # import ipdb; ipdb.set_trace()
+ if not isinstance(a, dict):
+ return a
+
+ b = b.copy()
+ for k, v in a.items():
+ if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
+
+ if not isinstance(b[k], dict) and not isinstance(b[k], list):
+ # if :
+ # import ipdb; ipdb.set_trace()
+ raise TypeError(
+ f"{k}={v} in child config cannot inherit from base "
+ f"because {k} is a dict in the child config but is of "
+ f"type {type(b[k])} in base config. You may set "
+ f"`{DELETE_KEY}=True` to ignore the base config"
+ )
+ b[k] = SLConfig._merge_a_into_b(v, b[k])
+ elif isinstance(b, list):
+ try:
+ _ = int(k)
+ except:
+ raise TypeError(f"b is a list, " f"index {k} should be an int when input but {type(k)}")
+ b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])
+ else:
+ b[k] = v
+
+ return b
+
+ @staticmethod
+ def fromfile(filename):
+ cfg_dict, cfg_text = SLConfig._file2dict(filename)
+ return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)
+
+ def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
+ if cfg_dict is None:
+ cfg_dict = dict()
+ elif not isinstance(cfg_dict, dict):
+ raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}")
+ for key in cfg_dict:
+ if key in RESERVED_KEYS:
+ raise KeyError(f"{key} is reserved for config file")
+
+ super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
+ super(SLConfig, self).__setattr__("_filename", filename)
+ if cfg_text:
+ text = cfg_text
+ elif filename:
+ with open(filename, "r") as f:
+ text = f.read()
+ else:
+ text = ""
+ super(SLConfig, self).__setattr__("_text", text)
+
+ @property
+ def filename(self):
+ return self._filename
+
+ @property
+ def text(self):
+ return self._text
+
+ @property
+ def pretty_text(self):
+
+ indent = 4
+
+ def _indent(s_, num_spaces):
+ s = s_.split("\n")
+ if len(s) == 1:
+ return s_
+ first = s.pop(0)
+ s = [(num_spaces * " ") + line for line in s]
+ s = "\n".join(s)
+ s = first + "\n" + s
+ return s
+
+ def _format_basic_types(k, v, use_mapping=False):
+ if isinstance(v, str):
+ v_str = f"'{v}'"
+ else:
+ v_str = str(v)
+
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f"{k_str}: {v_str}"
+ else:
+ attr_str = f"{str(k)}={v_str}"
+ attr_str = _indent(attr_str, indent)
+
+ return attr_str
+
+ def _format_list(k, v, use_mapping=False):
+ # check if all items in the list are dict
+ if all(isinstance(_, dict) for _ in v):
+ v_str = "[\n"
+ v_str += "\n".join(f"dict({_indent(_format_dict(v_), indent)})," for v_ in v).rstrip(",")
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f"{k_str}: {v_str}"
+ else:
+ attr_str = f"{str(k)}={v_str}"
+ attr_str = _indent(attr_str, indent) + "]"
+ else:
+ attr_str = _format_basic_types(k, v, use_mapping)
+ return attr_str
+
+ def _contain_invalid_identifier(dict_str):
+ contain_invalid_identifier = False
+ for key_name in dict_str:
+ contain_invalid_identifier |= not str(key_name).isidentifier()
+ return contain_invalid_identifier
+
+ def _format_dict(input_dict, outest_level=False):
+ r = ""
+ s = []
+
+ use_mapping = _contain_invalid_identifier(input_dict)
+ if use_mapping:
+ r += "{"
+ for idx, (k, v) in enumerate(input_dict.items()):
+ is_last = idx >= len(input_dict) - 1
+ end = "" if outest_level or is_last else ","
+ if isinstance(v, dict):
+ v_str = "\n" + _format_dict(v)
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f"{k_str}: dict({v_str}"
+ else:
+ attr_str = f"{str(k)}=dict({v_str}"
+ attr_str = _indent(attr_str, indent) + ")" + end
+ elif isinstance(v, list):
+ attr_str = _format_list(k, v, use_mapping) + end
+ else:
+ attr_str = _format_basic_types(k, v, use_mapping) + end
+
+ s.append(attr_str)
+ r += "\n".join(s)
+ if use_mapping:
+ r += "}"
+ return r
+
+ cfg_dict = self._cfg_dict.to_dict()
+ text = _format_dict(cfg_dict, outest_level=True)
+ # copied from setup.cfg
+ yapf_style = dict(
+ based_on_style="pep8",
+ blank_line_before_nested_class_or_def=True,
+ split_before_expression_after_opening_paren=True,
+ )
+ text, _ = FormatCode(text, style_config=yapf_style, verify=True)
+
+ return text
+
+ def __repr__(self):
+ return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}"
+
+ def __len__(self):
+ return len(self._cfg_dict)
+
+ def __getattr__(self, name):
+ # # debug
+ # print('+'*15)
+ # print('name=%s' % name)
+ # print("addr:", id(self))
+ # # print('type(self):', type(self))
+ # print(self.__dict__)
+ # print('+'*15)
+ # if self.__dict__ == {}:
+ # raise ValueError
+
+ return getattr(self._cfg_dict, name)
+
+ def __getitem__(self, name):
+ return self._cfg_dict.__getitem__(name)
+
+ def __setattr__(self, name, value):
+ if isinstance(value, dict):
+ value = ConfigDict(value)
+ self._cfg_dict.__setattr__(name, value)
+
+ def __setitem__(self, name, value):
+ if isinstance(value, dict):
+ value = ConfigDict(value)
+ self._cfg_dict.__setitem__(name, value)
+
+ def __iter__(self):
+ return iter(self._cfg_dict)
+
+ def dump(self, file=None):
+ # import ipdb; ipdb.set_trace()
+ if file is None:
+ return self.pretty_text
+ else:
+ with open(file, "w") as f:
+ f.write(self.pretty_text)
+
+ def merge_from_dict(self, options):
+ """Merge list into cfg_dict
+
+ Merge the dict parsed by MultipleKVAction into this cfg.
+
+ Examples:
+ >>> options = {'model.backbone.depth': 50,
+ ... 'model.backbone.with_cp':True}
+ >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
+ >>> cfg.merge_from_dict(options)
+ >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
+ >>> assert cfg_dict == dict(
+ ... model=dict(backbone=dict(depth=50, with_cp=True)))
+
+ Args:
+ options (dict): dict of configs to merge from.
+ """
+ option_cfg_dict = {}
+ for full_key, v in options.items():
+ d = option_cfg_dict
+ key_list = full_key.split(".")
+ for subkey in key_list[:-1]:
+ d.setdefault(subkey, ConfigDict())
+ d = d[subkey]
+ subkey = key_list[-1]
+ d[subkey] = v
+
+ cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict")
+ super(SLConfig, self).__setattr__("_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict))
+
+ # for multiprocess
+ def __setstate__(self, state):
+ self.__init__(state)
+
+ def copy(self):
+ return SLConfig(self._cfg_dict.copy())
+
+ def deepcopy(self):
+ return SLConfig(self._cfg_dict.deepcopy())
+
+
+class DictAction(Action):
+ """
+ argparse action to split an argument into KEY=VALUE form
+ on the first = and append to a dictionary. List options should
+ be passed as comma separated values, i.e KEY=V1,V2,V3
+ """
+
+ @staticmethod
+ def _parse_int_float_bool(val):
+ try:
+ return int(val)
+ except ValueError:
+ pass
+ try:
+ return float(val)
+ except ValueError:
+ pass
+ if val.lower() in ["true", "false"]:
+ return True if val.lower() == "true" else False
+ if val.lower() in ["none", "null"]:
+ return None
+ return val
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ options = {}
+ for kv in values:
+ key, val = kv.split("=", maxsplit=1)
+ val = [self._parse_int_float_bool(v) for v in val.split(",")]
+ if len(val) == 1:
+ val = val[0]
+ options[key] = val
+ setattr(namespace, self.dest, options)
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/slio.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/slio.py
new file mode 100644
index 00000000..f4923dd1
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/slio.py
@@ -0,0 +1,178 @@
+# ==========================================================
+# Modified from mmcv
+# ==========================================================
+
+import json
+import pickle
+from abc import ABCMeta, abstractmethod
+from pathlib import Path
+
+import yaml
+
+try:
+ from yaml import CDumper as Dumper
+ from yaml import CLoader as Loader
+except ImportError:
+ from yaml import Dumper, Loader
+
+
+# ===========================
+# Rigister handler
+# ===========================
+
+
+class BaseFileHandler(metaclass=ABCMeta):
+ @abstractmethod
+ def load_from_fileobj(self, file, **kwargs):
+ pass
+
+ @abstractmethod
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ pass
+
+ @abstractmethod
+ def dump_to_str(self, obj, **kwargs):
+ pass
+
+ def load_from_path(self, filepath, mode="r", **kwargs):
+ with open(filepath, mode) as f:
+ return self.load_from_fileobj(f, **kwargs)
+
+ def dump_to_path(self, obj, filepath, mode="w", **kwargs):
+ with open(filepath, mode) as f:
+ self.dump_to_fileobj(obj, f, **kwargs)
+
+
+class JsonHandler(BaseFileHandler):
+ def load_from_fileobj(self, file):
+ return json.load(file)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ json.dump(obj, file, **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ return json.dumps(obj, **kwargs)
+
+
+class PickleHandler(BaseFileHandler):
+ def load_from_fileobj(self, file, **kwargs):
+ return pickle.load(file, **kwargs)
+
+ def load_from_path(self, filepath, **kwargs):
+ return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ kwargs.setdefault("protocol", 2)
+ return pickle.dumps(obj, **kwargs)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ kwargs.setdefault("protocol", 2)
+ pickle.dump(obj, file, **kwargs)
+
+ def dump_to_path(self, obj, filepath, **kwargs):
+ super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs)
+
+
+class YamlHandler(BaseFileHandler):
+ def load_from_fileobj(self, file, **kwargs):
+ kwargs.setdefault("Loader", Loader)
+ return yaml.load(file, **kwargs)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ kwargs.setdefault("Dumper", Dumper)
+ yaml.dump(obj, file, **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ kwargs.setdefault("Dumper", Dumper)
+ return yaml.dump(obj, **kwargs)
+
+
+file_handlers = {
+ "json": JsonHandler(),
+ "yaml": YamlHandler(),
+ "yml": YamlHandler(),
+ "pickle": PickleHandler(),
+ "pkl": PickleHandler(),
+}
+
+# ===========================
+# load and dump
+# ===========================
+
+
+def is_str(x):
+ """Whether the input is an string instance.
+
+ Note: This method is deprecated since python 2 is no longer supported.
+ """
+ return isinstance(x, str)
+
+
+def slload(file, file_format=None, **kwargs):
+ """Load data from json/yaml/pickle files.
+
+ This method provides a unified api for loading data from serialized files.
+
+ Args:
+ file (str or :obj:`Path` or file-like object): Filename or a file-like
+ object.
+ file_format (str, optional): If not specified, the file format will be
+ inferred from the file extension, otherwise use the specified one.
+ Currently supported formats include "json", "yaml/yml" and
+ "pickle/pkl".
+
+ Returns:
+ The content from the file.
+ """
+ if isinstance(file, Path):
+ file = str(file)
+ if file_format is None and is_str(file):
+ file_format = file.split(".")[-1]
+ if file_format not in file_handlers:
+ raise TypeError(f"Unsupported format: {file_format}")
+
+ handler = file_handlers[file_format]
+ if is_str(file):
+ obj = handler.load_from_path(file, **kwargs)
+ elif hasattr(file, "read"):
+ obj = handler.load_from_fileobj(file, **kwargs)
+ else:
+ raise TypeError('"file" must be a filepath str or a file-object')
+ return obj
+
+
+def sldump(obj, file=None, file_format=None, **kwargs):
+ """Dump data to json/yaml/pickle strings or files.
+
+ This method provides a unified api for dumping data as strings or to files,
+ and also supports custom arguments for each file format.
+
+ Args:
+ obj (any): The python object to be dumped.
+ file (str or :obj:`Path` or file-like object, optional): If not
+ specified, then the object is dump to a str, otherwise to a file
+ specified by the filename or file-like object.
+ file_format (str, optional): Same as :func:`load`.
+
+ Returns:
+ bool: True for success, False otherwise.
+ """
+ if isinstance(file, Path):
+ file = str(file)
+ if file_format is None:
+ if is_str(file):
+ file_format = file.split(".")[-1]
+ elif file is None:
+ raise ValueError("file_format must be specified since file is None")
+ if file_format not in file_handlers:
+ raise TypeError(f"Unsupported format: {file_format}")
+
+ handler = file_handlers[file_format]
+ if file is None:
+ return handler.dump_to_str(obj, **kwargs)
+ elif is_str(file):
+ handler.dump_to_path(obj, file, **kwargs)
+ elif hasattr(file, "write"):
+ handler.dump_to_fileobj(obj, file, **kwargs)
+ else:
+ raise TypeError('"file" must be a filename str or a file-object')
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/time_counter.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/time_counter.py
new file mode 100644
index 00000000..0aedb2e4
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/time_counter.py
@@ -0,0 +1,62 @@
+import json
+import time
+
+
+class TimeCounter:
+ def __init__(self) -> None:
+ pass
+
+ def clear(self):
+ self.timedict = {}
+ self.basetime = time.perf_counter()
+
+ def timeit(self, name):
+ nowtime = time.perf_counter() - self.basetime
+ self.timedict[name] = nowtime
+ self.basetime = time.perf_counter()
+
+
+class TimeHolder:
+ def __init__(self) -> None:
+ self.timedict = {}
+
+ def update(self, _timedict: dict):
+ for k, v in _timedict.items():
+ if k not in self.timedict:
+ self.timedict[k] = AverageMeter(name=k, val_only=True)
+ self.timedict[k].update(val=v)
+
+ def final_res(self):
+ return {k: v.avg for k, v in self.timedict.items()}
+
+ def __str__(self):
+ return json.dumps(self.final_res(), indent=2)
+
+
+class AverageMeter(object):
+ """Computes and stores the average and current value"""
+
+ def __init__(self, name, fmt=":f", val_only=False):
+ self.name = name
+ self.fmt = fmt
+ self.val_only = val_only
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+ def __str__(self):
+ if self.val_only:
+ fmtstr = "{name} {val" + self.fmt + "}"
+ else:
+ fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
+ return fmtstr.format(**self.__dict__)
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/utils.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/utils.py
new file mode 100644
index 00000000..57b22d1d
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/utils.py
@@ -0,0 +1,601 @@
+import argparse
+import json
+import warnings
+from collections import OrderedDict
+from copy import deepcopy
+from typing import Any, Dict, List
+
+import numpy as np
+import torch
+from groundingdino.util.slconfig import SLConfig
+from transformers import AutoTokenizer
+
+
+def slprint(x, name="x"):
+ if isinstance(x, (torch.Tensor, np.ndarray)):
+ print(f"{name}.shape:", x.shape)
+ elif isinstance(x, (tuple, list)):
+ print("type x:", type(x))
+ for i in range(min(10, len(x))):
+ slprint(x[i], f"{name}[{i}]")
+ elif isinstance(x, dict):
+ for k, v in x.items():
+ slprint(v, f"{name}[{k}]")
+ else:
+ print(f"{name}.type:", type(x))
+
+
+def clean_state_dict(state_dict):
+ new_state_dict = OrderedDict()
+ for k, v in state_dict.items():
+ if k[:7] == "module.":
+ k = k[7:] # remove `module.`
+ new_state_dict[k] = v
+ return new_state_dict
+
+
+def renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) -> torch.FloatTensor:
+ # img: tensor(3,H,W) or tensor(B,3,H,W)
+ # return: same as img
+ assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
+ if img.dim() == 3:
+ assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
+ img.size(0),
+ str(img.size()),
+ )
+ img_perm = img.permute(1, 2, 0)
+ mean = torch.Tensor(mean)
+ std = torch.Tensor(std)
+ img_res = img_perm * std + mean
+ return img_res.permute(2, 0, 1)
+ else: # img.dim() == 4
+ assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
+ img.size(1),
+ str(img.size()),
+ )
+ img_perm = img.permute(0, 2, 3, 1)
+ mean = torch.Tensor(mean)
+ std = torch.Tensor(std)
+ img_res = img_perm * std + mean
+ return img_res.permute(0, 3, 1, 2)
+
+
+class CocoClassMapper:
+ def __init__(self) -> None:
+ self.category_map_str = {
+ "1": 1,
+ "2": 2,
+ "3": 3,
+ "4": 4,
+ "5": 5,
+ "6": 6,
+ "7": 7,
+ "8": 8,
+ "9": 9,
+ "10": 10,
+ "11": 11,
+ "13": 12,
+ "14": 13,
+ "15": 14,
+ "16": 15,
+ "17": 16,
+ "18": 17,
+ "19": 18,
+ "20": 19,
+ "21": 20,
+ "22": 21,
+ "23": 22,
+ "24": 23,
+ "25": 24,
+ "27": 25,
+ "28": 26,
+ "31": 27,
+ "32": 28,
+ "33": 29,
+ "34": 30,
+ "35": 31,
+ "36": 32,
+ "37": 33,
+ "38": 34,
+ "39": 35,
+ "40": 36,
+ "41": 37,
+ "42": 38,
+ "43": 39,
+ "44": 40,
+ "46": 41,
+ "47": 42,
+ "48": 43,
+ "49": 44,
+ "50": 45,
+ "51": 46,
+ "52": 47,
+ "53": 48,
+ "54": 49,
+ "55": 50,
+ "56": 51,
+ "57": 52,
+ "58": 53,
+ "59": 54,
+ "60": 55,
+ "61": 56,
+ "62": 57,
+ "63": 58,
+ "64": 59,
+ "65": 60,
+ "67": 61,
+ "70": 62,
+ "72": 63,
+ "73": 64,
+ "74": 65,
+ "75": 66,
+ "76": 67,
+ "77": 68,
+ "78": 69,
+ "79": 70,
+ "80": 71,
+ "81": 72,
+ "82": 73,
+ "84": 74,
+ "85": 75,
+ "86": 76,
+ "87": 77,
+ "88": 78,
+ "89": 79,
+ "90": 80,
+ }
+ self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
+ self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
+
+ def origin2compact(self, idx):
+ return self.origin2compact_mapper[int(idx)]
+
+ def compact2origin(self, idx):
+ return self.compact2origin_mapper[int(idx)]
+
+
+def to_device(item, device):
+ if isinstance(item, torch.Tensor):
+ return item.to(device)
+ elif isinstance(item, list):
+ return [to_device(i, device) for i in item]
+ elif isinstance(item, dict):
+ return {k: to_device(v, device) for k, v in item.items()}
+ else:
+ raise NotImplementedError("Call Shilong if you use other containers! type: {}".format(type(item)))
+
+
+#
+def get_gaussian_mean(x, axis, other_axis, softmax=True):
+ """
+
+ Args:
+ x (float): Input images(BxCxHxW)
+ axis (int): The index for weighted mean
+ other_axis (int): The other index
+
+ Returns: weighted index for axis, BxC
+
+ """
+ mat2line = torch.sum(x, axis=other_axis)
+ # mat2line = mat2line / mat2line.mean() * 10
+ if softmax:
+ u = torch.softmax(mat2line, axis=2)
+ else:
+ u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
+ size = x.shape[axis]
+ ind = torch.linspace(0, 1, size).to(x.device)
+ batch = x.shape[0]
+ channel = x.shape[1]
+ index = ind.repeat([batch, channel, 1])
+ mean_position = torch.sum(index * u, dim=2)
+ return mean_position
+
+
+def get_expected_points_from_map(hm, softmax=True):
+ """get_gaussian_map_from_points
+ B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
+ softargmax function
+
+ Args:
+ hm (float): Input images(BxCxHxW)
+
+ Returns:
+ weighted index for axis, BxCx2. float between 0 and 1.
+
+ """
+ # hm = 10*hm
+ B, C, H, W = hm.shape
+ y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
+ x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
+ # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
+ return torch.stack([x_mean, y_mean], dim=2)
+
+
+# Positional encoding (section 5.1)
+# borrow from nerf
+class Embedder:
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.create_embedding_fn()
+
+ def create_embedding_fn(self):
+ embed_fns = []
+ d = self.kwargs["input_dims"]
+ out_dim = 0
+ if self.kwargs["include_input"]:
+ embed_fns.append(lambda x: x)
+ out_dim += d
+
+ max_freq = self.kwargs["max_freq_log2"]
+ N_freqs = self.kwargs["num_freqs"]
+
+ if self.kwargs["log_sampling"]:
+ freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
+ else:
+ freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
+
+ for freq in freq_bands:
+ for p_fn in self.kwargs["periodic_fns"]:
+ embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
+ out_dim += d
+
+ self.embed_fns = embed_fns
+ self.out_dim = out_dim
+
+ def embed(self, inputs):
+ return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
+
+
+def get_embedder(multires, i=0):
+ import torch.nn as nn
+
+ if i == -1:
+ return nn.Identity(), 3
+
+ embed_kwargs = {
+ "include_input": True,
+ "input_dims": 3,
+ "max_freq_log2": multires - 1,
+ "num_freqs": multires,
+ "log_sampling": True,
+ "periodic_fns": [torch.sin, torch.cos],
+ }
+
+ embedder_obj = Embedder(**embed_kwargs)
+ embed = lambda x, eo=embedder_obj: eo.embed(x)
+ return embed, embedder_obj.out_dim
+
+
+class APOPMeter:
+ def __init__(self) -> None:
+ self.tp = 0
+ self.fp = 0
+ self.tn = 0
+ self.fn = 0
+
+ def update(self, pred, gt):
+ """
+ Input:
+ pred, gt: Tensor()
+ """
+ assert pred.shape == gt.shape
+ self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
+ self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
+ self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
+ self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
+
+ def update_cm(self, tp, fp, tn, fn):
+ self.tp += tp
+ self.fp += fp
+ self.tn += tn
+ self.tn += fn
+
+
+def inverse_sigmoid(x, eps=1e-5):
+ x = x.clamp(min=0, max=1)
+ x1 = x.clamp(min=eps)
+ x2 = (1 - x).clamp(min=eps)
+ return torch.log(x1 / x2)
+
+
+def get_raw_dict(args):
+ """
+ return the dicf contained in args.
+
+ e.g:
+ >>> with open(path, 'w') as f:
+ json.dump(get_raw_dict(args), f, indent=2)
+ """
+ if isinstance(args, argparse.Namespace):
+ return vars(args)
+ elif isinstance(args, dict):
+ return args
+ elif isinstance(args, SLConfig):
+ return args._cfg_dict
+ else:
+ raise NotImplementedError("Unknown type {}".format(type(args)))
+
+
+def stat_tensors(tensor):
+ assert tensor.dim() == 1
+ tensor_sm = tensor.softmax(0)
+ entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
+
+ return {
+ "max": tensor.max(),
+ "min": tensor.min(),
+ "mean": tensor.mean(),
+ "var": tensor.var(),
+ "std": tensor.var() ** 0.5,
+ "entropy": entropy,
+ }
+
+
+class NiceRepr:
+ """Inherit from this class and define ``__nice__`` to "nicely" print your
+ objects.
+
+ Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
+ Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
+ If the inheriting class has a ``__len__``, method then the default
+ ``__nice__`` method will return its length.
+
+ Example:
+ >>> class Foo(NiceRepr):
+ ... def __nice__(self):
+ ... return 'info'
+ >>> foo = Foo()
+ >>> assert str(foo) == ''
+ >>> assert repr(foo).startswith('>> class Bar(NiceRepr):
+ ... pass
+ >>> bar = Bar()
+ >>> import pytest
+ >>> with pytest.warns(None) as record:
+ >>> assert 'object at' in str(bar)
+ >>> assert 'object at' in repr(bar)
+
+ Example:
+ >>> class Baz(NiceRepr):
+ ... def __len__(self):
+ ... return 5
+ >>> baz = Baz()
+ >>> assert str(baz) == ''
+ """
+
+ def __nice__(self):
+ """str: a "nice" summary string describing this module"""
+ if hasattr(self, "__len__"):
+ # It is a common pattern for objects to use __len__ in __nice__
+ # As a convenience we define a default __nice__ for these objects
+ return str(len(self))
+ else:
+ # In all other cases force the subclass to overload __nice__
+ raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
+
+ def __repr__(self):
+ """str: the string of the module"""
+ try:
+ nice = self.__nice__()
+ classname = self.__class__.__name__
+ return f"<{classname}({nice}) at {hex(id(self))}>"
+ except NotImplementedError as ex:
+ warnings.warn(str(ex), category=RuntimeWarning)
+ return object.__repr__(self)
+
+ def __str__(self):
+ """str: the string of the module"""
+ try:
+ classname = self.__class__.__name__
+ nice = self.__nice__()
+ return f"<{classname}({nice})>"
+ except NotImplementedError as ex:
+ warnings.warn(str(ex), category=RuntimeWarning)
+ return object.__repr__(self)
+
+
+def ensure_rng(rng=None):
+ """Coerces input into a random number generator.
+
+ If the input is None, then a global random state is returned.
+
+ If the input is a numeric value, then that is used as a seed to construct a
+ random state. Otherwise the input is returned as-is.
+
+ Adapted from [1]_.
+
+ Args:
+ rng (int | numpy.random.RandomState | None):
+ if None, then defaults to the global rng. Otherwise this can be an
+ integer or a RandomState class
+ Returns:
+ (numpy.random.RandomState) : rng -
+ a numpy random number generator
+
+ References:
+ .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
+ """
+
+ if rng is None:
+ rng = np.random.mtrand._rand
+ elif isinstance(rng, int):
+ rng = np.random.RandomState(rng)
+ else:
+ rng = rng
+ return rng
+
+
+def random_boxes(num=1, scale=1, rng=None):
+ """Simple version of ``kwimage.Boxes.random``
+
+ Returns:
+ Tensor: shape (n, 4) in x1, y1, x2, y2 format.
+
+ References:
+ https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
+
+ Example:
+ >>> num = 3
+ >>> scale = 512
+ >>> rng = 0
+ >>> boxes = random_boxes(num, scale, rng)
+ >>> print(boxes)
+ tensor([[280.9925, 278.9802, 308.6148, 366.1769],
+ [216.9113, 330.6978, 224.0446, 456.5878],
+ [405.3632, 196.3221, 493.3953, 270.7942]])
+ """
+ rng = ensure_rng(rng)
+
+ tlbr = rng.rand(num, 4).astype(np.float32)
+
+ tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
+ tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
+ br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
+ br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
+
+ tlbr[:, 0] = tl_x * scale
+ tlbr[:, 1] = tl_y * scale
+ tlbr[:, 2] = br_x * scale
+ tlbr[:, 3] = br_y * scale
+
+ boxes = torch.from_numpy(tlbr)
+ return boxes
+
+
+class ModelEma(torch.nn.Module):
+ def __init__(self, model, decay=0.9997, device=None):
+ super(ModelEma, self).__init__()
+ # make a copy of the model for accumulating moving average of weights
+ self.module = deepcopy(model)
+ self.module.eval()
+
+ # import ipdb; ipdb.set_trace()
+
+ self.decay = decay
+ self.device = device # perform ema on different device from model if set
+ if self.device is not None:
+ self.module.to(device=device)
+
+ def _update(self, model, update_fn):
+ with torch.no_grad():
+ for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
+ if self.device is not None:
+ model_v = model_v.to(device=self.device)
+ ema_v.copy_(update_fn(ema_v, model_v))
+
+ def update(self, model):
+ self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
+
+ def set(self, model):
+ self._update(model, update_fn=lambda e, m: m)
+
+
+class BestMetricSingle:
+ def __init__(self, init_res=0.0, better="large") -> None:
+ self.init_res = init_res
+ self.best_res = init_res
+ self.best_ep = -1
+
+ self.better = better
+ assert better in ["large", "small"]
+
+ def isbetter(self, new_res, old_res):
+ if self.better == "large":
+ return new_res > old_res
+ if self.better == "small":
+ return new_res < old_res
+
+ def update(self, new_res, ep):
+ if self.isbetter(new_res, self.best_res):
+ self.best_res = new_res
+ self.best_ep = ep
+ return True
+ return False
+
+ def __str__(self) -> str:
+ return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+ def summary(self) -> dict:
+ return {
+ "best_res": self.best_res,
+ "best_ep": self.best_ep,
+ }
+
+
+class BestMetricHolder:
+ def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
+ self.best_all = BestMetricSingle(init_res, better)
+ self.use_ema = use_ema
+ if use_ema:
+ self.best_ema = BestMetricSingle(init_res, better)
+ self.best_regular = BestMetricSingle(init_res, better)
+
+ def update(self, new_res, epoch, is_ema=False):
+ """
+ return if the results is the best.
+ """
+ if not self.use_ema:
+ return self.best_all.update(new_res, epoch)
+ else:
+ if is_ema:
+ self.best_ema.update(new_res, epoch)
+ return self.best_all.update(new_res, epoch)
+ else:
+ self.best_regular.update(new_res, epoch)
+ return self.best_all.update(new_res, epoch)
+
+ def summary(self):
+ if not self.use_ema:
+ return self.best_all.summary()
+
+ res = {}
+ res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
+ res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
+ res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
+ return res
+
+ def __repr__(self) -> str:
+ return json.dumps(self.summary(), indent=2)
+
+ def __str__(self) -> str:
+ return self.__repr__()
+
+
+def targets_to(targets: List[Dict[str, Any]], device):
+ """Moves the target dicts to the given device."""
+ excluded_keys = [
+ "questionId",
+ "tokens_positive",
+ "strings_positive",
+ "tokens",
+ "dataset_name",
+ "sentence_id",
+ "original_img_id",
+ "nb_eval",
+ "task_id",
+ "original_id",
+ "token_span",
+ "caption",
+ "dataset_type",
+ ]
+ return [{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets]
+
+
+def get_phrases_from_posmap(
+ posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255
+):
+ assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
+ if posmap.dim() == 1:
+ posmap[0 : left_idx + 1] = False
+ posmap[right_idx:] = False
+ non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
+ token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
+ return tokenizer.decode(token_ids)
+ else:
+ raise NotImplementedError("posmap must be 1-dim")
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/visualizer.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/visualizer.py
new file mode 100644
index 00000000..73dcbca6
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/visualizer.py
@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+"""
+@File : visualizer.py
+@Time : 2022/04/05 11:39:33
+@Author : Shilong Liu
+@Contact : slongliu86@gmail.com
+"""
+
+import datetime
+import os
+
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+from matplotlib import transforms
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Polygon
+from pycocotools import mask as maskUtils
+
+
+def renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) -> torch.FloatTensor:
+ # img: tensor(3,H,W) or tensor(B,3,H,W)
+ # return: same as img
+ assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
+ if img.dim() == 3:
+ assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
+ img.size(0),
+ str(img.size()),
+ )
+ img_perm = img.permute(1, 2, 0)
+ mean = torch.Tensor(mean)
+ std = torch.Tensor(std)
+ img_res = img_perm * std + mean
+ return img_res.permute(2, 0, 1)
+ else: # img.dim() == 4
+ assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
+ img.size(1),
+ str(img.size()),
+ )
+ img_perm = img.permute(0, 2, 3, 1)
+ mean = torch.Tensor(mean)
+ std = torch.Tensor(std)
+ img_res = img_perm * std + mean
+ return img_res.permute(0, 3, 1, 2)
+
+
+class ColorMap:
+ def __init__(self, basergb=[255, 255, 0]):
+ self.basergb = np.array(basergb)
+
+ def __call__(self, attnmap):
+ # attnmap: h, w. np.uint8.
+ # return: h, w, 4. np.uint8.
+ assert attnmap.dtype == np.uint8
+ h, w = attnmap.shape
+ res = self.basergb.copy()
+ res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3
+ attn1 = attnmap.copy()[..., None] # h, w, 1
+ res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)
+ return res
+
+
+def rainbow_text(x, y, ls, lc, **kw):
+ """
+ Take a list of strings ``ls`` and colors ``lc`` and place them next to each
+ other, with text ls[i] being shown in color lc[i].
+
+ This example shows how to do both vertical and horizontal text, and will
+ pass all keyword arguments to plt.text, so you can set the font size,
+ family, etc.
+ """
+ t = plt.gca().transData
+ fig = plt.gcf()
+ plt.show()
+
+ # horizontal version
+ for s, c in zip(ls, lc):
+ text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw)
+ text.draw(fig.canvas.get_renderer())
+ ex = text.get_window_extent()
+ t = transforms.offset_copy(text._transform, x=ex.width, units="dots")
+
+ # #vertical version
+ # for s,c in zip(ls,lc):
+ # text = plt.text(x,y," "+s+" ",color=c, transform=t,
+ # rotation=90,va='bottom',ha='center',**kw)
+ # text.draw(fig.canvas.get_renderer())
+ # ex = text.get_window_extent()
+ # t = transforms.offset_copy(text._transform, y=ex.height, units='dots')
+
+
+class COCOVisualizer:
+ def __init__(self, coco=None, tokenlizer=None) -> None:
+ self.coco = coco
+
+ def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"):
+ """
+ img: tensor(3, H, W)
+ tgt: make sure they are all on cpu.
+ must have items: 'image_id', 'boxes', 'size'
+ """
+ plt.figure(dpi=dpi)
+ plt.rcParams["font.size"] = "5"
+ ax = plt.gca()
+ img = renorm(img).permute(1, 2, 0)
+ # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
+ # import ipdb; ipdb.set_trace()
+ ax.imshow(img)
+
+ self.addtgt(tgt)
+
+ if tgt is None:
+ image_id = 0
+ elif "image_id" not in tgt:
+ image_id = 0
+ else:
+ image_id = tgt["image_id"]
+
+ if caption is None:
+ savename = "{}/{}-{}.png".format(savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-"))
+ else:
+ savename = "{}/{}-{}-{}.png".format(
+ savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
+ )
+ print("savename: {}".format(savename))
+ os.makedirs(os.path.dirname(savename), exist_ok=True)
+ plt.savefig(savename)
+ plt.close()
+
+ def addtgt(self, tgt):
+ """ """
+ if tgt is None or not "boxes" in tgt:
+ ax = plt.gca()
+
+ if "caption" in tgt:
+ ax.set_title(tgt["caption"], wrap=True)
+
+ ax.set_axis_off()
+ return
+
+ ax = plt.gca()
+ H, W = tgt["size"]
+ numbox = tgt["boxes"].shape[0]
+
+ color = []
+ polygons = []
+ boxes = []
+ for box in tgt["boxes"].cpu():
+ unnormbbox = box * torch.Tensor([W, H, W, H])
+ unnormbbox[:2] -= unnormbbox[2:] / 2
+ [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()
+ boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])
+ poly = [
+ [bbox_x, bbox_y],
+ [bbox_x, bbox_y + bbox_h],
+ [bbox_x + bbox_w, bbox_y + bbox_h],
+ [bbox_x + bbox_w, bbox_y],
+ ]
+ np_poly = np.array(poly).reshape((4, 2))
+ polygons.append(Polygon(np_poly))
+ c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
+ color.append(c)
+
+ p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)
+ ax.add_collection(p)
+ p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
+ ax.add_collection(p)
+
+ if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0:
+ assert len(tgt["strings_positive"]) == numbox, f"{len(tgt['strings_positive'])} = {numbox}, "
+ for idx, strlist in enumerate(tgt["strings_positive"]):
+ cate_id = int(tgt["labels"][idx])
+ _string = str(cate_id) + ":" + " ".join(strlist)
+ bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
+ # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
+ ax.text(
+ bbox_x,
+ bbox_y,
+ _string,
+ color="black",
+ bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
+ )
+
+ if "box_label" in tgt:
+ assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, "
+ for idx, bl in enumerate(tgt["box_label"]):
+ _string = str(bl)
+ bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
+ # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
+ ax.text(
+ bbox_x,
+ bbox_y,
+ _string,
+ color="black",
+ bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
+ )
+
+ if "caption" in tgt:
+ ax.set_title(tgt["caption"], wrap=True)
+ # plt.figure()
+ # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(),
+ # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])
+
+ if "attn" in tgt:
+ # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
+ # import ipdb; ipdb.set_trace()
+ if isinstance(tgt["attn"], tuple):
+ tgt["attn"] = [tgt["attn"]]
+ for item in tgt["attn"]:
+ attn_map, basergb = item
+ attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)
+ attn_map = (attn_map * 255).astype(np.uint8)
+ cm = ColorMap(basergb)
+ heatmap = cm(attn_map)
+ ax.imshow(heatmap)
+ ax.set_axis_off()
+
+ def showAnns(self, anns, draw_bbox=False):
+ """
+ Display the specified annotations.
+ :param anns (array of object): annotations to display
+ :return: None
+ """
+ if len(anns) == 0:
+ return 0
+ if "segmentation" in anns[0] or "keypoints" in anns[0]:
+ datasetType = "instances"
+ elif "caption" in anns[0]:
+ datasetType = "captions"
+ else:
+ raise Exception("datasetType not supported")
+ if datasetType == "instances":
+ ax = plt.gca()
+ ax.set_autoscale_on(False)
+ polygons = []
+ color = []
+ for ann in anns:
+ c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
+ if "segmentation" in ann:
+ if type(ann["segmentation"]) == list:
+ # polygon
+ for seg in ann["segmentation"]:
+ poly = np.array(seg).reshape((int(len(seg) / 2), 2))
+ polygons.append(Polygon(poly))
+ color.append(c)
+ else:
+ # mask
+ t = self.imgs[ann["image_id"]]
+ if type(ann["segmentation"]["counts"]) == list:
+ rle = maskUtils.frPyObjects([ann["segmentation"]], t["height"], t["width"])
+ else:
+ rle = [ann["segmentation"]]
+ m = maskUtils.decode(rle)
+ img = np.ones((m.shape[0], m.shape[1], 3))
+ if ann["iscrowd"] == 1:
+ color_mask = np.array([2.0, 166.0, 101.0]) / 255
+ if ann["iscrowd"] == 0:
+ color_mask = np.random.random((1, 3)).tolist()[0]
+ for i in range(3):
+ img[:, :, i] = color_mask[i]
+ ax.imshow(np.dstack((img, m * 0.5)))
+ if "keypoints" in ann and type(ann["keypoints"]) == list:
+ # turn skeleton into zero-based index
+ sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
+ kp = np.array(ann["keypoints"])
+ x = kp[0::3]
+ y = kp[1::3]
+ v = kp[2::3]
+ for sk in sks:
+ if np.all(v[sk] > 0):
+ plt.plot(x[sk], y[sk], linewidth=3, color=c)
+ plt.plot(
+ x[v > 0],
+ y[v > 0],
+ "o",
+ markersize=8,
+ markerfacecolor=c,
+ markeredgecolor="k",
+ markeredgewidth=2,
+ )
+ plt.plot(
+ x[v > 1],
+ y[v > 1],
+ "o",
+ markersize=8,
+ markerfacecolor=c,
+ markeredgecolor=c,
+ markeredgewidth=2,
+ )
+
+ if draw_bbox:
+ [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
+ poly = [
+ [bbox_x, bbox_y],
+ [bbox_x, bbox_y + bbox_h],
+ [bbox_x + bbox_w, bbox_y + bbox_h],
+ [bbox_x + bbox_w, bbox_y],
+ ]
+ np_poly = np.array(poly).reshape((4, 2))
+ polygons.append(Polygon(np_poly))
+ color.append(c)
+
+ # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
+ # ax.add_collection(p)
+ p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
+ ax.add_collection(p)
+ elif datasetType == "captions":
+ for ann in anns:
+ print(ann["caption"])
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/util/vl_utils.py b/projects/PCSegSAM2/grounding_dino/groundingdino/util/vl_utils.py
new file mode 100644
index 00000000..89c653ed
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/util/vl_utils.py
@@ -0,0 +1,100 @@
+import os
+import random
+from typing import List
+
+import torch
+
+
+def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
+ """construct a map such that positive_map[i,j] = True iff box i is associated to token j
+ Input:
+ - tokenized:
+ - input_ids: Tensor[1, ntokens]
+ - attention_mask: Tensor[1, ntokens]
+ - token_span: list with length num_boxes.
+ - each item: [start_idx, end_idx]
+ """
+ positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)
+ for j, tok_list in enumerate(token_span):
+ for beg, end in tok_list:
+ beg_pos = tokenized.char_to_token(beg)
+ end_pos = tokenized.char_to_token(end - 1)
+ if beg_pos is None:
+ try:
+ beg_pos = tokenized.char_to_token(beg + 1)
+ if beg_pos is None:
+ beg_pos = tokenized.char_to_token(beg + 2)
+ except:
+ beg_pos = None
+ if end_pos is None:
+ try:
+ end_pos = tokenized.char_to_token(end - 2)
+ if end_pos is None:
+ end_pos = tokenized.char_to_token(end - 3)
+ except:
+ end_pos = None
+ if beg_pos is None or end_pos is None:
+ continue
+
+ assert beg_pos is not None and end_pos is not None
+ if os.environ.get("SHILONG_DEBUG_ONLY_ONE_POS", None) == "TRUE":
+ positive_map[j, beg_pos] = 1
+ break
+ else:
+ positive_map[j, beg_pos : end_pos + 1].fill_(1)
+
+ return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)
+
+
+def build_captions_and_token_span(cat_list, force_lowercase):
+ """
+ Return:
+ captions: str
+ cat2tokenspan: dict
+ {
+ 'dog': [[0, 2]],
+ ...
+ }
+ """
+
+ cat2tokenspan = {}
+ captions = ""
+ for catname in cat_list:
+ class_name = catname
+ if force_lowercase:
+ class_name = class_name.lower()
+ if "/" in class_name:
+ class_name_list: List = class_name.strip().split("/")
+ class_name_list.append(class_name)
+ class_name: str = random.choice(class_name_list)
+
+ tokens_positive_i = []
+ subnamelist = [i.strip() for i in class_name.strip().split(" ")]
+ for subname in subnamelist:
+ if len(subname) == 0:
+ continue
+ if len(captions) > 0:
+ captions = captions + " "
+ strat_idx = len(captions)
+ end_idx = strat_idx + len(subname)
+ tokens_positive_i.append([strat_idx, end_idx])
+ captions = captions + subname
+
+ if len(tokens_positive_i) > 0:
+ captions = captions + " ."
+ cat2tokenspan[class_name] = tokens_positive_i
+
+ return captions, cat2tokenspan
+
+
+def build_id2posspan_and_caption(category_dict: dict):
+ """Build id2pos_span and caption from category_dict
+
+ Args:
+ category_dict (dict): category_dict
+ """
+ cat_list = [item["name"].lower() for item in category_dict]
+ id2catname = {item["id"]: item["name"].lower() for item in category_dict}
+ caption, cat2posspan = build_captions_and_token_span(cat_list, force_lowercase=True)
+ id2posspan = {catid: cat2posspan[catname] for catid, catname in id2catname.items()}
+ return id2posspan, caption
diff --git a/projects/PCSegSAM2/grounding_dino/groundingdino/version.py b/projects/PCSegSAM2/grounding_dino/groundingdino/version.py
new file mode 100644
index 00000000..3dc1f76b
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/groundingdino/version.py
@@ -0,0 +1 @@
+__version__ = "0.1.0"
diff --git a/projects/PCSegSAM2/grounding_dino/pyproject.toml b/projects/PCSegSAM2/grounding_dino/pyproject.toml
new file mode 100644
index 00000000..9c076134
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/pyproject.toml
@@ -0,0 +1,8 @@
+[build-system]
+requires = [
+ "setuptools>=42",
+ "wheel",
+ "torch",
+ "torchvision"
+]
+build-backend = "setuptools.build_meta"
diff --git a/projects/PCSegSAM2/grounding_dino/requirements.txt b/projects/PCSegSAM2/grounding_dino/requirements.txt
new file mode 100644
index 00000000..24aa11dc
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/requirements.txt
@@ -0,0 +1,10 @@
+torch
+torchvision
+transformers
+addict
+yapf
+timm
+numpy
+opencv-python
+supervision>=0.22.0
+pycocotools
diff --git a/projects/PCSegSAM2/grounding_dino/setup.py b/projects/PCSegSAM2/grounding_dino/setup.py
new file mode 100644
index 00000000..37b69e60
--- /dev/null
+++ b/projects/PCSegSAM2/grounding_dino/setup.py
@@ -0,0 +1,227 @@
+# coding=utf-8
+# Copyright 2022 The IDEA Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------------------------
+# Modified from
+# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/setup.py
+# https://github.com/facebookresearch/detectron2/blob/main/setup.py
+# https://github.com/open-mmlab/mmdetection/blob/master/setup.py
+# https://github.com/Oneflow-Inc/libai/blob/main/setup.py
+# ------------------------------------------------------------------------------------------------
+
+import glob
+import os
+import subprocess
+import sys
+
+
+def install_torch():
+ try:
+ import torch
+ except ImportError:
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "torch"])
+
+
+# Call the function to ensure torch is installed
+install_torch()
+
+import torch
+from setuptools import find_packages, setup
+from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
+
+# groundingdino version info
+version = "0.1.0"
+package_name = "groundingdino"
+cwd = os.path.dirname(os.path.abspath(__file__))
+
+
+sha = "Unknown"
+try:
+ sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
+except Exception:
+ pass
+
+
+def write_version_file():
+ version_path = os.path.join(cwd, "groundingdino", "version.py")
+ with open(version_path, "w") as f:
+ f.write(f"__version__ = '{version}'\n")
+ # f.write(f"git_version = {repr(sha)}\n")
+
+
+requirements = ["torch", "torchvision"]
+
+torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
+
+
+def get_extensions():
+ this_dir = os.path.dirname(os.path.abspath(__file__))
+ extensions_dir = os.path.join(this_dir, "groundingdino", "models", "GroundingDINO", "csrc")
+
+ main_source = os.path.join(extensions_dir, "vision.cpp")
+ sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
+ source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
+ os.path.join(extensions_dir, "*.cu")
+ )
+
+ sources = [main_source] + sources
+
+ extension = CppExtension
+
+ extra_compile_args = {"cxx": []}
+ define_macros = []
+
+ if CUDA_HOME is not None and (torch.cuda.is_available() or "TORCH_CUDA_ARCH_LIST" in os.environ):
+ print("Compiling with CUDA")
+ extension = CUDAExtension
+ sources += source_cuda
+ define_macros += [("WITH_CUDA", None)]
+ extra_compile_args["nvcc"] = [
+ "-DCUDA_HAS_FP16=1",
+ "-D__CUDA_NO_HALF_OPERATORS__",
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
+ "-D__CUDA_NO_HALF2_OPERATORS__",
+ "-gencode=arch=compute_70,code=sm_70",
+ "-gencode=arch=compute_75,code=sm_75",
+ "-gencode=arch=compute_80,code=sm_80",
+ "-gencode=arch=compute_86,code=sm_86",
+ "-gencode=arch=compute_89,code=sm_89",
+ "-gencode arch=compute_89,code=compute_89",
+ # "-gencode=arch=compute_120,code=sm_120", #TODO(knzo25): uncomment when CUDA 12.8 is available for native support for blackwell
+ ]
+ else:
+ print("Compiling without CUDA")
+ define_macros += [("WITH_HIP", None)]
+ extra_compile_args["nvcc"] = []
+ return None
+
+ sources = [os.path.join(extensions_dir, s) for s in sources]
+ include_dirs = [extensions_dir]
+
+ ext_modules = [
+ extension(
+ "groundingdino._C",
+ sources,
+ include_dirs=include_dirs,
+ define_macros=define_macros,
+ extra_compile_args=extra_compile_args,
+ )
+ ]
+
+ return ext_modules
+
+
+def parse_requirements(fname="requirements.txt", with_version=True):
+ """Parse the package dependencies listed in a requirements file but strips
+ specific versioning information.
+
+ Args:
+ fname (str): path to requirements file
+ with_version (bool, default=False): if True include version specs
+
+ Returns:
+ List[str]: list of requirements items
+
+ CommandLine:
+ python -c "import setup; print(setup.parse_requirements())"
+ """
+ import re
+ import sys
+ from os.path import exists
+
+ require_fpath = fname
+
+ def parse_line(line):
+ """Parse information from a line in a requirements text file."""
+ if line.startswith("-r "):
+ # Allow specifying requirements in other files
+ target = line.split(" ")[1]
+ for info in parse_require_file(target):
+ yield info
+ else:
+ info = {"line": line}
+ if line.startswith("-e "):
+ info["package"] = line.split("#egg=")[1]
+ elif "@git+" in line:
+ info["package"] = line
+ else:
+ # Remove versioning from the package
+ pat = "(" + "|".join([">=", "==", ">"]) + ")"
+ parts = re.split(pat, line, maxsplit=1)
+ parts = [p.strip() for p in parts]
+
+ info["package"] = parts[0]
+ if len(parts) > 1:
+ op, rest = parts[1:]
+ if ";" in rest:
+ # Handle platform specific dependencies
+ # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
+ version, platform_deps = map(str.strip, rest.split(";"))
+ info["platform_deps"] = platform_deps
+ else:
+ version = rest # NOQA
+ info["version"] = (op, version)
+ yield info
+
+ def parse_require_file(fpath):
+ with open(fpath, "r") as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ for info in parse_line(line):
+ yield info
+
+ def gen_packages_items():
+ if exists(require_fpath):
+ for info in parse_require_file(require_fpath):
+ parts = [info["package"]]
+ if with_version and "version" in info:
+ parts.extend(info["version"])
+ if not sys.version.startswith("3.4"):
+ # apparently package_deps are broken in 3.4
+ platform_deps = info.get("platform_deps")
+ if platform_deps is not None:
+ parts.append(";" + platform_deps)
+ item = "".join(parts)
+ yield item
+
+ packages = list(gen_packages_items())
+ return packages
+
+
+if __name__ == "__main__":
+ print(f"Building wheel {package_name}-{version}")
+
+ with open("LICENSE", "r", encoding="utf-8") as f:
+ license = f.read()
+
+ write_version_file()
+
+ setup(
+ name="groundingdino",
+ version="0.1.0",
+ author="International Digital Economy Academy, Shilong Liu",
+ url="https://github.com/IDEA-Research/GroundingDINO",
+ description="open-set object detector",
+ license=license,
+ # install_requires=parse_requirements("requirements.txt"),
+ packages=find_packages(
+ exclude=(
+ "configs",
+ "tests",
+ )
+ ),
+ ext_modules=get_extensions(),
+ cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
+ )
diff --git a/projects/PCSegSAM2/pyproject.toml b/projects/PCSegSAM2/pyproject.toml
new file mode 100644
index 00000000..6116740c
--- /dev/null
+++ b/projects/PCSegSAM2/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools>=62.3.0,<75.9",
+ "torch>=2.3.1",
+ ]
+build-backend = "setuptools.build_meta"
diff --git a/projects/PCSegSAM2/sam2/__init__.py b/projects/PCSegSAM2/sam2/__init__.py
new file mode 100644
index 00000000..0712dd03
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from hydra import initialize_config_module
+from hydra.core.global_hydra import GlobalHydra
+
+if not GlobalHydra.instance().is_initialized():
+ initialize_config_module("sam2", version_base="1.2")
diff --git a/projects/PCSegSAM2/sam2/automatic_mask_generator.py b/projects/PCSegSAM2/sam2/automatic_mask_generator.py
new file mode 100644
index 00000000..e7c5f139
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/automatic_mask_generator.py
@@ -0,0 +1,433 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
+from typing import Any, Dict, List, Optional, Tuple
+
+import numpy as np
+import torch
+from sam2.modeling.sam2_base import SAM2Base
+from sam2.sam2_image_predictor import SAM2ImagePredictor
+from sam2.utils.amg import (
+ MaskData,
+ area_from_rle,
+ batch_iterator,
+ batched_mask_to_box,
+ box_xyxy_to_xywh,
+ build_all_layer_point_grids,
+ calculate_stability_score,
+ coco_encode_rle,
+ generate_crop_boxes,
+ is_box_near_crop_edge,
+ mask_to_rle_pytorch,
+ remove_small_regions,
+ rle_to_mask,
+ uncrop_boxes_xyxy,
+ uncrop_masks,
+ uncrop_points,
+)
+from torchvision.ops.boxes import batched_nms, box_area # type: ignore
+
+
+class SAM2AutomaticMaskGenerator:
+ def __init__(
+ self,
+ model: SAM2Base,
+ points_per_side: Optional[int] = 32,
+ points_per_batch: int = 64,
+ pred_iou_thresh: float = 0.8,
+ stability_score_thresh: float = 0.95,
+ stability_score_offset: float = 1.0,
+ mask_threshold: float = 0.0,
+ box_nms_thresh: float = 0.7,
+ crop_n_layers: int = 0,
+ crop_nms_thresh: float = 0.7,
+ crop_overlap_ratio: float = 512 / 1500,
+ crop_n_points_downscale_factor: int = 1,
+ point_grids: Optional[List[np.ndarray]] = None,
+ min_mask_region_area: int = 0,
+ output_mode: str = "binary_mask",
+ use_m2m: bool = False,
+ multimask_output: bool = True,
+ **kwargs,
+ ) -> None:
+ """
+ Using a SAM 2 model, generates masks for the entire image.
+ Generates a grid of point prompts over the image, then filters
+ low quality and duplicate masks. The default settings are chosen
+ for SAM 2 with a HieraL backbone.
+
+ Arguments:
+ model (Sam): The SAM 2 model to use for mask prediction.
+ points_per_side (int or None): The number of points to be sampled
+ along one side of the image. The total number of points is
+ points_per_side**2. If None, 'point_grids' must provide explicit
+ point sampling.
+ points_per_batch (int): Sets the number of points run simultaneously
+ by the model. Higher numbers may be faster but use more GPU memory.
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
+ model's predicted mask quality.
+ stability_score_thresh (float): A filtering threshold in [0,1], using
+ the stability of the mask under changes to the cutoff used to binarize
+ the model's mask predictions.
+ stability_score_offset (float): The amount to shift the cutoff when
+ calculated the stability score.
+ mask_threshold (float): Threshold for binarizing the mask logits
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks.
+ crop_n_layers (int): If >0, mask prediction will be run again on
+ crops of the image. Sets the number of layers to run, where each
+ layer has 2**i_layer number of image crops.
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks between different crops.
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
+ In the first crop layer, crops will overlap by this fraction of
+ the image length. Later layers with more crops scale down this overlap.
+ crop_n_points_downscale_factor (int): The number of points-per-side
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
+ point_grids (list(np.ndarray) or None): A list over explicit grids
+ of points used for sampling, normalized to [0,1]. The nth grid in the
+ list is used in the nth crop layer. Exclusive with points_per_side.
+ min_mask_region_area (int): If >0, postprocessing will be applied
+ to remove disconnected regions and holes in masks with area smaller
+ than min_mask_region_area. Requires opencv.
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
+ For large resolutions, 'binary_mask' may consume large amounts of
+ memory.
+ use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
+ multimask_output (bool): Whether to output multimask at each point of the grid.
+ """
+
+ assert (points_per_side is None) != (
+ point_grids is None
+ ), "Exactly one of points_per_side or point_grid must be provided."
+ if points_per_side is not None:
+ self.point_grids = build_all_layer_point_grids(
+ points_per_side,
+ crop_n_layers,
+ crop_n_points_downscale_factor,
+ )
+ elif point_grids is not None:
+ self.point_grids = point_grids
+ else:
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
+
+ assert output_mode in [
+ "binary_mask",
+ "uncompressed_rle",
+ "coco_rle",
+ ], f"Unknown output_mode {output_mode}."
+ if output_mode == "coco_rle":
+ try:
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
+ except ImportError as e:
+ print("Please install pycocotools")
+ raise e
+
+ self.predictor = SAM2ImagePredictor(
+ model,
+ max_hole_area=min_mask_region_area,
+ max_sprinkle_area=min_mask_region_area,
+ )
+ self.points_per_batch = points_per_batch
+ self.pred_iou_thresh = pred_iou_thresh
+ self.stability_score_thresh = stability_score_thresh
+ self.stability_score_offset = stability_score_offset
+ self.mask_threshold = mask_threshold
+ self.box_nms_thresh = box_nms_thresh
+ self.crop_n_layers = crop_n_layers
+ self.crop_nms_thresh = crop_nms_thresh
+ self.crop_overlap_ratio = crop_overlap_ratio
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
+ self.min_mask_region_area = min_mask_region_area
+ self.output_mode = output_mode
+ self.use_m2m = use_m2m
+ self.multimask_output = multimask_output
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2AutomaticMaskGenerator): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_hf
+
+ sam_model = build_sam2_hf(model_id, **kwargs)
+ return cls(sam_model, **kwargs)
+
+ @torch.no_grad()
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
+ """
+ Generates masks for the given image.
+
+ Arguments:
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
+
+ Returns:
+ list(dict(str, any)): A list over records for masks. Each record is
+ a dict containing the following keys:
+ segmentation (dict(str, any) or np.ndarray): The mask. If
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
+ is a dictionary containing the RLE.
+ bbox (list(float)): The box around the mask, in XYWH format.
+ area (int): The area in pixels of the mask.
+ predicted_iou (float): The model's own prediction of the mask's
+ quality. This is filtered by the pred_iou_thresh parameter.
+ point_coords (list(list(float))): The point coordinates input
+ to the model to generate this mask.
+ stability_score (float): A measure of the mask's quality. This
+ is filtered on using the stability_score_thresh parameter.
+ crop_box (list(float)): The crop of the image used to generate
+ the mask, given in XYWH format.
+ """
+
+ # Generate masks
+ mask_data = self._generate_masks(image)
+
+ # Encode masks
+ if self.output_mode == "coco_rle":
+ mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
+ elif self.output_mode == "binary_mask":
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
+ else:
+ mask_data["segmentations"] = mask_data["rles"]
+
+ # Write mask records
+ curr_anns = []
+ for idx in range(len(mask_data["segmentations"])):
+ ann = {
+ "segmentation": mask_data["segmentations"][idx],
+ "area": area_from_rle(mask_data["rles"][idx]),
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
+ "point_coords": [mask_data["points"][idx].tolist()],
+ "stability_score": mask_data["stability_score"][idx].item(),
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
+ }
+ curr_anns.append(ann)
+
+ return curr_anns
+
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
+ orig_size = image.shape[:2]
+ crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio)
+
+ # Iterate over image crops
+ data = MaskData()
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
+ data.cat(crop_data)
+
+ # Remove duplicate masks between crops
+ if len(crop_boxes) > 1:
+ # Prefer masks from smaller crops
+ scores = 1 / box_area(data["crop_boxes"])
+ scores = scores.to(data["boxes"].device)
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ scores,
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.crop_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+ data.to_numpy()
+ return data
+
+ def _process_crop(
+ self,
+ image: np.ndarray,
+ crop_box: List[int],
+ crop_layer_idx: int,
+ orig_size: Tuple[int, ...],
+ ) -> MaskData:
+ # Crop the image and calculate embeddings
+ x0, y0, x1, y1 = crop_box
+ cropped_im = image[y0:y1, x0:x1, :]
+ cropped_im_size = cropped_im.shape[:2]
+ self.predictor.set_image(cropped_im)
+
+ # Get points for this crop
+ points_scale = np.array(cropped_im_size)[None, ::-1]
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
+
+ # Generate masks for this crop in batches
+ data = MaskData()
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
+ batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, normalize=True)
+ data.cat(batch_data)
+ del batch_data
+ self.predictor.reset_predictor()
+
+ # Remove duplicates within this crop.
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ data["iou_preds"],
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.box_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+
+ # Return to the original image frame
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
+ data["points"] = uncrop_points(data["points"], crop_box)
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
+
+ return data
+
+ def _process_batch(
+ self,
+ points: np.ndarray,
+ im_size: Tuple[int, ...],
+ crop_box: List[int],
+ orig_size: Tuple[int, ...],
+ normalize=False,
+ ) -> MaskData:
+ orig_h, orig_w = orig_size
+
+ # Run model on this batch
+ points = torch.as_tensor(points, dtype=torch.float32, device=self.predictor.device)
+ in_points = self.predictor._transforms.transform_coords(points, normalize=normalize, orig_hw=im_size)
+ in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
+ masks, iou_preds, low_res_masks = self.predictor._predict(
+ in_points[:, None, :],
+ in_labels[:, None],
+ multimask_output=self.multimask_output,
+ return_logits=True,
+ )
+
+ # Serialize predictions and store in MaskData
+ data = MaskData(
+ masks=masks.flatten(0, 1),
+ iou_preds=iou_preds.flatten(0, 1),
+ points=points.repeat_interleave(masks.shape[1], dim=0),
+ low_res_masks=low_res_masks.flatten(0, 1),
+ )
+ del masks
+
+ if not self.use_m2m:
+ # Filter by predicted IoU
+ if self.pred_iou_thresh > 0.0:
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
+ data.filter(keep_mask)
+
+ # Calculate and filter by stability score
+ data["stability_score"] = calculate_stability_score(
+ data["masks"], self.mask_threshold, self.stability_score_offset
+ )
+ if self.stability_score_thresh > 0.0:
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
+ data.filter(keep_mask)
+ else:
+ # One step refinement using previous mask predictions
+ in_points = self.predictor._transforms.transform_coords(
+ data["points"], normalize=normalize, orig_hw=im_size
+ )
+ labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
+ masks, ious = self.refine_with_m2m(in_points, labels, data["low_res_masks"], self.points_per_batch)
+ data["masks"] = masks.squeeze(1)
+ data["iou_preds"] = ious.squeeze(1)
+
+ if self.pred_iou_thresh > 0.0:
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
+ data.filter(keep_mask)
+
+ data["stability_score"] = calculate_stability_score(
+ data["masks"], self.mask_threshold, self.stability_score_offset
+ )
+ if self.stability_score_thresh > 0.0:
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
+ data.filter(keep_mask)
+
+ # Threshold masks and calculate boxes
+ data["masks"] = data["masks"] > self.mask_threshold
+ data["boxes"] = batched_mask_to_box(data["masks"])
+
+ # Filter boxes that touch crop boundaries
+ keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
+ if not torch.all(keep_mask):
+ data.filter(keep_mask)
+
+ # Compress to RLE
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
+ del data["masks"]
+
+ return data
+
+ @staticmethod
+ def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData:
+ """
+ Removes small disconnected regions and holes in masks, then reruns
+ box NMS to remove any new duplicates.
+
+ Edits mask_data in place.
+
+ Requires open-cv as a dependency.
+ """
+ if len(mask_data["rles"]) == 0:
+ return mask_data
+
+ # Filter small disconnected regions and holes
+ new_masks = []
+ scores = []
+ for rle in mask_data["rles"]:
+ mask = rle_to_mask(rle)
+
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
+ unchanged = not changed
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
+ unchanged = unchanged and not changed
+
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
+ # Give score=0 to changed masks and score=1 to unchanged masks
+ # so NMS will prefer ones that didn't need postprocessing
+ scores.append(float(unchanged))
+
+ # Recalculate boxes and remove any new duplicates
+ masks = torch.cat(new_masks, dim=0)
+ boxes = batched_mask_to_box(masks)
+ keep_by_nms = batched_nms(
+ boxes.float(),
+ torch.as_tensor(scores),
+ torch.zeros_like(boxes[:, 0]), # categories
+ iou_threshold=nms_thresh,
+ )
+
+ # Only recalculate RLEs for masks that have changed
+ for i_mask in keep_by_nms:
+ if scores[i_mask] == 0.0:
+ mask_torch = masks[i_mask].unsqueeze(0)
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
+ mask_data.filter(keep_by_nms)
+
+ return mask_data
+
+ def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
+ new_masks = []
+ new_iou_preds = []
+
+ for cur_points, cur_point_labels, low_res_mask in batch_iterator(
+ points_per_batch, points, point_labels, low_res_masks
+ ):
+ best_masks, best_iou_preds, _ = self.predictor._predict(
+ cur_points[:, None, :],
+ cur_point_labels[:, None],
+ mask_input=low_res_mask[:, None, :],
+ multimask_output=False,
+ return_logits=True,
+ )
+ new_masks.append(best_masks)
+ new_iou_preds.append(best_iou_preds)
+ masks = torch.cat(new_masks, dim=0)
+ return masks, torch.cat(new_iou_preds, dim=0)
diff --git a/projects/PCSegSAM2/sam2/build_sam.py b/projects/PCSegSAM2/sam2/build_sam.py
new file mode 100644
index 00000000..be486be0
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/build_sam.py
@@ -0,0 +1,164 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+import os
+
+import sam2
+import torch
+from hydra import compose
+from hydra.utils import instantiate
+from omegaconf import OmegaConf
+
+# Check if the user is running Python from the parent directory of the sam2 repo
+# (i.e. the directory where this repo is cloned into) -- this is not supported since
+# it could shadow the sam2 package and cause issues.
+if os.path.isdir(os.path.join(sam2.__path__[0], "sam2")):
+ # If the user has "sam2/sam2" in their path, they are likey importing the repo itself
+ # as "sam2" rather than importing the "sam2" python package (i.e. "sam2/sam2" directory).
+ # This typically happens because the user is running Python from the parent directory
+ # that contains the sam2 repo they cloned.
+ raise RuntimeError(
+ "You're likely running Python from the parent directory of the sam2 repository "
+ "(i.e. the directory where https://github.com/facebookresearch/sam2 is cloned into). "
+ "This is not supported since the `sam2` Python package could be shadowed by the "
+ "repository name (the repository is also named `sam2` and contains the Python package "
+ "in `sam2/sam2`). Please run Python from another directory (e.g. from the repo dir "
+ "rather than its parent dir, or from your home directory) after installing SAM 2."
+ )
+
+
+HF_MODEL_ID_TO_FILENAMES = {
+ "facebook/sam2-hiera-tiny": (
+ "configs/sam2/sam2_hiera_t.yaml",
+ "sam2_hiera_tiny.pt",
+ ),
+ "facebook/sam2-hiera-small": (
+ "configs/sam2/sam2_hiera_s.yaml",
+ "sam2_hiera_small.pt",
+ ),
+ "facebook/sam2-hiera-base-plus": (
+ "configs/sam2/sam2_hiera_b+.yaml",
+ "sam2_hiera_base_plus.pt",
+ ),
+ "facebook/sam2-hiera-large": (
+ "configs/sam2/sam2_hiera_l.yaml",
+ "sam2_hiera_large.pt",
+ ),
+ "facebook/sam2.1-hiera-tiny": (
+ "configs/sam2.1/sam2.1_hiera_t.yaml",
+ "sam2.1_hiera_tiny.pt",
+ ),
+ "facebook/sam2.1-hiera-small": (
+ "configs/sam2.1/sam2.1_hiera_s.yaml",
+ "sam2.1_hiera_small.pt",
+ ),
+ "facebook/sam2.1-hiera-base-plus": (
+ "configs/sam2.1/sam2.1_hiera_b+.yaml",
+ "sam2.1_hiera_base_plus.pt",
+ ),
+ "facebook/sam2.1-hiera-large": (
+ "configs/sam2.1/sam2.1_hiera_l.yaml",
+ "sam2.1_hiera_large.pt",
+ ),
+}
+
+
+def build_sam2(
+ config_file,
+ ckpt_path=None,
+ device="cuda",
+ mode="eval",
+ hydra_overrides_extra=[],
+ apply_postprocessing=True,
+ **kwargs,
+):
+
+ if apply_postprocessing:
+ hydra_overrides_extra = hydra_overrides_extra.copy()
+ hydra_overrides_extra += [
+ # dynamically fall back to multi-mask if the single mask is not stable
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
+ ]
+ # Read config and init model
+ cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
+ OmegaConf.resolve(cfg)
+ model = instantiate(cfg.model, _recursive_=True)
+ _load_checkpoint(model, ckpt_path)
+ model = model.to(device)
+ if mode == "eval":
+ model.eval()
+ return model
+
+
+def build_sam2_video_predictor(
+ config_file,
+ ckpt_path=None,
+ device="cuda",
+ mode="eval",
+ hydra_overrides_extra=[],
+ apply_postprocessing=True,
+ **kwargs,
+):
+ hydra_overrides = [
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
+ ]
+ if apply_postprocessing:
+ hydra_overrides_extra = hydra_overrides_extra.copy()
+ hydra_overrides_extra += [
+ # dynamically fall back to multi-mask if the single mask is not stable
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
+ # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
+ "++model.binarize_mask_from_pts_for_mem_enc=true",
+ # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
+ "++model.fill_hole_area=8",
+ ]
+ hydra_overrides.extend(hydra_overrides_extra)
+
+ # Read config and init model
+ cfg = compose(config_name=config_file, overrides=hydra_overrides)
+ OmegaConf.resolve(cfg)
+ model = instantiate(cfg.model, _recursive_=True)
+ _load_checkpoint(model, ckpt_path)
+ model = model.to(device)
+ if mode == "eval":
+ model.eval()
+ return model
+
+
+def _hf_download(model_id):
+ from huggingface_hub import hf_hub_download
+
+ config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id]
+ ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
+ return config_name, ckpt_path
+
+
+def build_sam2_hf(model_id, **kwargs):
+ config_name, ckpt_path = _hf_download(model_id)
+ return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
+
+
+def build_sam2_video_predictor_hf(model_id, **kwargs):
+ config_name, ckpt_path = _hf_download(model_id)
+ return build_sam2_video_predictor(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
+
+
+def _load_checkpoint(model, ckpt_path):
+ if ckpt_path is not None:
+ sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"]
+ missing_keys, unexpected_keys = model.load_state_dict(sd)
+ if missing_keys:
+ logging.error(missing_keys)
+ raise RuntimeError()
+ if unexpected_keys:
+ logging.error(unexpected_keys)
+ raise RuntimeError()
+ logging.info("Loaded checkpoint sucessfully")
diff --git a/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml
new file mode 100644
index 00000000..cbee3cf9
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml
@@ -0,0 +1,116 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_l.yaml b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_l.yaml
new file mode 100644
index 00000000..33c9097f
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_l.yaml
@@ -0,0 +1,120 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 144
+ num_heads: 2
+ stages: [2, 6, 36, 4]
+ global_att_blocks: [23, 33, 43]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ window_spec: [8, 4, 16, 8]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [1152, 576, 288, 144]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_s.yaml b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_s.yaml
new file mode 100644
index 00000000..8e803dfe
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_s.yaml
@@ -0,0 +1,119 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 11, 2]
+ global_att_blocks: [7, 10, 13]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_t.yaml b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_t.yaml
new file mode 100644
index 00000000..983c2ea0
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2.1/sam2.1_hiera_t.yaml
@@ -0,0 +1,121 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 7, 2]
+ global_att_blocks: [5, 7, 9]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ # SAM decoder
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # HieraT does not currently support compilation, should always be set to False
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml b/projects/PCSegSAM2/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml
new file mode 100644
index 00000000..b17e59d7
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml
@@ -0,0 +1,338 @@
+# @package _global_
+
+scratch:
+ resolution: 1024
+ train_batch_size: 1
+ num_train_workers: 10
+ num_frames: 8
+ max_num_objects: 3
+ base_lr: 5.0e-6
+ vision_lr: 3.0e-06
+ phases_per_epoch: 1
+ num_epochs: 40
+
+dataset:
+ # PATHS to Dataset
+ img_folder: null # PATH to MOSE JPEGImages folder
+ gt_folder: null # PATH to MOSE Annotations folder
+ file_list_txt: training/assets/MOSE_sample_train_list.txt # Optional PATH to filelist containing a subset of videos to be used for training
+ multiplier: 2
+
+# Video transforms
+vos:
+ train_transforms:
+ - _target_: training.dataset.transforms.ComposeAPI
+ transforms:
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
+ consistent_transform: True
+ - _target_: training.dataset.transforms.RandomAffine
+ degrees: 25
+ shear: 20
+ image_interpolation: bilinear
+ consistent_transform: True
+ - _target_: training.dataset.transforms.RandomResizeAPI
+ sizes: ${scratch.resolution}
+ square: true
+ consistent_transform: True
+ - _target_: training.dataset.transforms.ColorJitter
+ consistent_transform: True
+ brightness: 0.1
+ contrast: 0.03
+ saturation: 0.03
+ hue: null
+ - _target_: training.dataset.transforms.RandomGrayscale
+ p: 0.05
+ consistent_transform: True
+ - _target_: training.dataset.transforms.ColorJitter
+ consistent_transform: False
+ brightness: 0.1
+ contrast: 0.05
+ saturation: 0.05
+ hue: null
+ - _target_: training.dataset.transforms.ToTensorAPI
+ - _target_: training.dataset.transforms.NormalizeAPI
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+
+trainer:
+ _target_: training.trainer.Trainer
+ mode: train_only
+ max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}}
+ accelerator: cuda
+ seed_value: 123
+
+ model:
+ _target_: training.model.sam2.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: ${scratch.resolution}
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # compile_image_encoder: False
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: True # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: True
+
+
+ data:
+ train:
+ _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
+ phases_per_epoch: ${scratch.phases_per_epoch}
+ batch_sizes:
+ - ${scratch.train_batch_size}
+
+ datasets:
+ - _target_: training.dataset.utils.RepeatFactorWrapper
+ dataset:
+ _target_: training.dataset.utils.ConcatDataset
+ datasets:
+ - _target_: training.dataset.vos_dataset.VOSDataset
+ transforms: ${vos.train_transforms}
+ training: true
+ video_dataset:
+ _target_: training.dataset.vos_raw_dataset.PNGRawDataset
+ img_folder: ${dataset.img_folder}
+ gt_folder: ${dataset.gt_folder}
+ file_list_txt: ${dataset.file_list_txt}
+ sampler:
+ _target_: training.dataset.vos_sampler.RandomUniformSampler
+ num_frames: ${scratch.num_frames}
+ max_num_objects: ${scratch.max_num_objects}
+ multiplier: ${dataset.multiplier}
+ shuffle: True
+ num_workers: ${scratch.num_train_workers}
+ pin_memory: True
+ drop_last: True
+ collate_fn:
+ _target_: training.utils.data_utils.collate_fn
+ _partial_: true
+ dict_key: all
+
+ optim:
+ amp:
+ enabled: True
+ amp_dtype: bfloat16
+
+ optimizer:
+ _target_: torch.optim.AdamW
+
+ gradient_clip:
+ _target_: training.optimizer.GradientClipper
+ max_norm: 0.1
+ norm_type: 2
+
+ param_group_modifiers:
+ - _target_: training.optimizer.layer_decay_param_modifier
+ _partial_: True
+ layer_decay_value: 0.9
+ apply_to: 'image_encoder.trunk'
+ overrides:
+ - pattern: '*pos_embed*'
+ value: 1.0
+
+ options:
+ lr:
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
+ start_value: ${scratch.base_lr}
+ end_value: ${divide:${scratch.base_lr},10}
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
+ start_value: ${scratch.vision_lr}
+ end_value: ${divide:${scratch.vision_lr},10}
+ param_names:
+ - 'image_encoder.*'
+ weight_decay:
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
+ value: 0.1
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
+ value: 0.0
+ param_names:
+ - '*bias*'
+ module_cls_names: ['torch.nn.LayerNorm']
+
+ loss:
+ all:
+ _target_: training.loss_fns.MultiStepMultiMasksAndIous
+ weight_dict:
+ loss_mask: 20
+ loss_dice: 1
+ loss_iou: 1
+ loss_class: 1
+ supervise_all_iou: true
+ iou_use_l1_loss: true
+ pred_obj_scores: true
+ focal_gamma_obj_score: 0.0
+ focal_alpha_obj_score: -1.0
+
+ distributed:
+ backend: nccl
+ find_unused_parameters: True
+
+ logging:
+ tensorboard_writer:
+ _target_: training.utils.logger.make_tensorboard_logger
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
+ flush_secs: 120
+ should_log: True
+ log_dir: ${launcher.experiment_log_dir}/logs
+ log_freq: 10
+
+ # initialize from a SAM 2 checkpoint
+ checkpoint:
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
+ save_freq: 0 # 0 only last checkpoint is saved.
+ model_weight_initializer:
+ _partial_: True
+ _target_: training.utils.checkpoint_utils.load_state_dict_into_model
+ strict: True
+ ignore_unexpected_keys: null
+ ignore_missing_keys: null
+
+ state_dict:
+ _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
+ checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint
+ ckpt_state_dict_keys: ['model']
+
+launcher:
+ num_nodes: 1
+ gpus_per_node: 8
+ experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name}
+
+# SLURM args if running on a cluster
+submitit:
+ partition: null
+ account: null
+ qos: null
+ cpus_per_task: 10
+ use_cluster: false
+ timeout_hour: 24
+ name: null
+ port_range: [10000, 65000]
diff --git a/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_b+.yaml b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_b+.yaml
new file mode 100644
index 00000000..58f3eb81
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_b+.yaml
@@ -0,0 +1,113 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_l.yaml b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_l.yaml
new file mode 100644
index 00000000..918667f5
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_l.yaml
@@ -0,0 +1,117 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 144
+ num_heads: 2
+ stages: [2, 6, 36, 4]
+ global_att_blocks: [23, 33, 43]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ window_spec: [8, 4, 16, 8]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [1152, 576, 288, 144]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_s.yaml b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_s.yaml
new file mode 100644
index 00000000..26e5d4d3
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_s.yaml
@@ -0,0 +1,116 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 11, 2]
+ global_att_blocks: [7, 10, 13]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_t.yaml b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_t.yaml
new file mode 100644
index 00000000..a62c903a
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/configs/sam2/sam2_hiera_t.yaml
@@ -0,0 +1,118 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 7, 2]
+ global_att_blocks: [5, 7, 9]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [32, 32]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ # SAM decoder
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # HieraT does not currently support compilation, should always be set to False
+ compile_image_encoder: False
diff --git a/projects/PCSegSAM2/sam2/csrc/connected_components.cu b/projects/PCSegSAM2/sam2/csrc/connected_components.cu
new file mode 100644
index 00000000..ced21eb3
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/csrc/connected_components.cu
@@ -0,0 +1,289 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+
+// This source code is licensed under the license found in the
+// LICENSE file in the root directory of this source tree.
+
+// adapted from https://github.com/zsef123/Connected_components_PyTorch
+// with license found in the LICENSE_cctorch file in the root directory.
+#include
+#include
+#include
+#include
+#include
+#include
+
+// 2d
+#define BLOCK_ROWS 16
+#define BLOCK_COLS 16
+
+namespace cc2d {
+
+template
+__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
+ return (bitmap >> pos) & 1;
+}
+
+__device__ int32_t find(const int32_t* s_buf, int32_t n) {
+ while (s_buf[n] != n)
+ n = s_buf[n];
+ return n;
+}
+
+__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
+ const int32_t id = n;
+ while (s_buf[n] != n) {
+ n = s_buf[n];
+ s_buf[id] = n;
+ }
+ return n;
+}
+
+__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
+ bool done;
+ do {
+ a = find(s_buf, a);
+ b = find(s_buf, b);
+
+ if (a < b) {
+ int32_t old = atomicMin(s_buf + b, a);
+ done = (old == b);
+ b = old;
+ } else if (b < a) {
+ int32_t old = atomicMin(s_buf + a, b);
+ done = (old == a);
+ a = old;
+ } else
+ done = true;
+
+ } while (!done);
+}
+
+__global__ void
+init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row < H && col < W)
+ label[idx] = idx;
+}
+
+__global__ void
+merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ uint32_t P = 0;
+
+ if (img[idx])
+ P |= 0x777;
+ if (row + 1 < H && img[idx + W])
+ P |= 0x777 << 4;
+ if (col + 1 < W && img[idx + 1])
+ P |= 0x777 << 1;
+
+ if (col == 0)
+ P &= 0xEEEE;
+ if (col + 1 >= W)
+ P &= 0x3333;
+ else if (col + 2 >= W)
+ P &= 0x7777;
+
+ if (row == 0)
+ P &= 0xFFF0;
+ if (row + 1 >= H)
+ P &= 0xFF;
+
+ if (P > 0) {
+ // If need check about top-left pixel(if flag the first bit) and hit the
+ // top-left pixel
+ if (hasBit(P, 0) && img[idx - W - 1]) {
+ union_(label, idx, idx - 2 * W - 2); // top left block
+ }
+
+ if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
+ union_(label, idx, idx - 2 * W); // top bottom block
+
+ if (hasBit(P, 3) && img[idx + 2 - W])
+ union_(label, idx, idx - 2 * W + 2); // top right block
+
+ if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
+ union_(label, idx, idx - 2); // just left block
+ }
+}
+
+__global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row < H && col < W)
+ find_n_compress(label, idx);
+}
+
+__global__ void final_labeling(
+ const uint8_t* img,
+ int32_t* label,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx] + 1;
+
+ if (img[idx])
+ label[idx] = y;
+ else
+ label[idx] = 0;
+
+ if (col + 1 < W) {
+ if (img[idx + 1])
+ label[idx + 1] = y;
+ else
+ label[idx + 1] = 0;
+
+ if (row + 1 < H) {
+ if (img[idx + W + 1])
+ label[idx + W + 1] = y;
+ else
+ label[idx + W + 1] = 0;
+ }
+ }
+
+ if (row + 1 < H) {
+ if (img[idx + W])
+ label[idx + W] = y;
+ else
+ label[idx + W] = 0;
+ }
+}
+
+__global__ void init_counting(
+ const int32_t* label,
+ int32_t* count_init,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx];
+ if (y > 0) {
+ int32_t count_idx = y - 1;
+ atomicAdd(count_init + count_idx, 1);
+ }
+}
+
+__global__ void final_counting(
+ const int32_t* label,
+ const int32_t* count_init,
+ int32_t* count_final,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx];
+ if (y > 0) {
+ int32_t count_idx = y - 1;
+ count_final[idx] = count_init[count_idx];
+ } else {
+ count_final[idx] = 0;
+ }
+}
+
+} // namespace cc2d
+
+std::vector get_connected_componnets(
+ const torch::Tensor& inputs) {
+ AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
+ AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
+ AT_ASSERTM(
+ inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
+
+ const uint32_t N = inputs.size(0);
+ const uint32_t C = inputs.size(1);
+ const uint32_t H = inputs.size(2);
+ const uint32_t W = inputs.size(3);
+
+ AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
+ AT_ASSERTM((H % 2) == 0, "height must be an even number");
+ AT_ASSERTM((W % 2) == 0, "width must be an even number");
+
+ // label must be uint32_t
+ auto label_options =
+ torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
+ torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
+ torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
+ torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
+
+ dim3 grid = dim3(
+ ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
+ ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
+ dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
+ dim3 grid_count =
+ dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
+ dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ for (int n = 0; n < N; n++) {
+ uint32_t offset = n * H * W;
+
+ cc2d::init_labeling<<>>(
+ labels.data_ptr() + offset, W, H);
+ cc2d::merge<<>>(
+ inputs.data_ptr() + offset,
+ labels.data_ptr() + offset,
+ W,
+ H);
+ cc2d::compression<<>>(
+ labels.data_ptr() + offset, W, H);
+ cc2d::final_labeling<<>>(
+ inputs.data_ptr() + offset,
+ labels.data_ptr() + offset,
+ W,
+ H);
+
+ // get the counting of each pixel
+ cc2d::init_counting<<>>(
+ labels.data_ptr() + offset,
+ counts_init.data_ptr() + offset,
+ W,
+ H);
+ cc2d::final_counting<<>>(
+ labels.data_ptr() + offset,
+ counts_init.data_ptr() + offset,
+ counts_final.data_ptr() + offset,
+ W,
+ H);
+ }
+
+ // returned values are [labels, counts]
+ std::vector outputs;
+ outputs.push_back(labels);
+ outputs.push_back(counts_final);
+ return outputs;
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def(
+ "get_connected_componnets",
+ &get_connected_componnets,
+ "get_connected_componnets");
+}
diff --git a/projects/PCSegSAM2/sam2/modeling/__init__.py b/projects/PCSegSAM2/sam2/modeling/__init__.py
new file mode 100644
index 00000000..5277f461
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/projects/PCSegSAM2/sam2/modeling/backbones/__init__.py b/projects/PCSegSAM2/sam2/modeling/backbones/__init__.py
new file mode 100644
index 00000000..5277f461
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/backbones/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/projects/PCSegSAM2/sam2/modeling/backbones/hieradet.py b/projects/PCSegSAM2/sam2/modeling/backbones/hieradet.py
new file mode 100644
index 00000000..27e6589b
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/backbones/hieradet.py
@@ -0,0 +1,303 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+from functools import partial
+from typing import List, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from iopath.common.file_io import g_pathmgr
+from sam2.modeling.backbones.utils import (
+ PatchEmbed,
+ window_partition,
+ window_unpartition,
+)
+from sam2.modeling.sam2_utils import MLP, DropPath
+
+
+def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
+ if pool is None:
+ return x
+ # (B, H, W, C) -> (B, C, H, W)
+ x = x.permute(0, 3, 1, 2)
+ x = pool(x)
+ # (B, C, H', W') -> (B, H', W', C)
+ x = x.permute(0, 2, 3, 1)
+ if norm:
+ x = norm(x)
+
+ return x
+
+
+class MultiScaleAttention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ dim_out: int,
+ num_heads: int,
+ q_pool: nn.Module = None,
+ ):
+ super().__init__()
+
+ self.dim = dim
+ self.dim_out = dim_out
+ self.num_heads = num_heads
+ self.q_pool = q_pool
+ self.qkv = nn.Linear(dim, dim_out * 3)
+ self.proj = nn.Linear(dim_out, dim_out)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, H, W, _ = x.shape
+ # qkv with shape (B, H * W, 3, nHead, C)
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
+ # q, k, v with shape (B, H * W, nheads, C)
+ q, k, v = torch.unbind(qkv, 2)
+
+ # Q pooling (for downsample at stage changes)
+ if self.q_pool:
+ q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
+ H, W = q.shape[1:3] # downsampled shape
+ q = q.reshape(B, H * W, self.num_heads, -1)
+
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
+ x = F.scaled_dot_product_attention(
+ q.transpose(1, 2),
+ k.transpose(1, 2),
+ v.transpose(1, 2),
+ )
+ # Transpose back
+ x = x.transpose(1, 2)
+ x = x.reshape(B, H, W, -1)
+
+ x = self.proj(x)
+
+ return x
+
+
+class MultiScaleBlock(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ dim_out: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ drop_path: float = 0.0,
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
+ q_stride: Tuple[int, int] = None,
+ act_layer: nn.Module = nn.GELU,
+ window_size: int = 0,
+ ):
+ super().__init__()
+
+ if isinstance(norm_layer, str):
+ norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
+
+ self.dim = dim
+ self.dim_out = dim_out
+ self.norm1 = norm_layer(dim)
+
+ self.window_size = window_size
+
+ self.pool, self.q_stride = None, q_stride
+ if self.q_stride:
+ self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False)
+
+ self.attn = MultiScaleAttention(
+ dim,
+ dim_out,
+ num_heads=num_heads,
+ q_pool=self.pool,
+ )
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.norm2 = norm_layer(dim_out)
+ self.mlp = MLP(
+ dim_out,
+ int(dim_out * mlp_ratio),
+ dim_out,
+ num_layers=2,
+ activation=act_layer,
+ )
+
+ if dim != dim_out:
+ self.proj = nn.Linear(dim, dim_out)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ shortcut = x # B, H, W, C
+ x = self.norm1(x)
+
+ # Skip connection
+ if self.dim != self.dim_out:
+ shortcut = do_pool(self.proj(x), self.pool)
+
+ # Window partition
+ window_size = self.window_size
+ if window_size > 0:
+ H, W = x.shape[1], x.shape[2]
+ x, pad_hw = window_partition(x, window_size)
+
+ # Window Attention + Q Pooling (if stage change)
+ x = self.attn(x)
+ if self.q_stride:
+ # Shapes have changed due to Q pooling
+ window_size = self.window_size // self.q_stride[0]
+ H, W = shortcut.shape[1:3]
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ pad_hw = (H + pad_h, W + pad_w)
+
+ # Reverse window partition
+ if self.window_size > 0:
+ x = window_unpartition(x, window_size, pad_hw, (H, W))
+
+ x = shortcut + self.drop_path(x)
+ # MLP
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ return x
+
+
+class Hiera(nn.Module):
+ """
+ Reference: https://arxiv.org/abs/2306.00989
+ """
+
+ def __init__(
+ self,
+ embed_dim: int = 96, # initial embed dim
+ num_heads: int = 1, # initial number of heads
+ drop_path_rate: float = 0.0, # stochastic depth
+ q_pool: int = 3, # number of q_pool stages
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
+ head_mul: float = 2.0, # head_mul factor at stage shift
+ window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
+ # window size per stage, when not using global att.
+ window_spec: Tuple[int, ...] = (
+ 8,
+ 4,
+ 14,
+ 7,
+ ),
+ # global attn in these blocks
+ global_att_blocks: Tuple[int, ...] = (
+ 12,
+ 16,
+ 20,
+ ),
+ weights_path=None,
+ return_interm_layers=True, # return feats from every stage
+ ):
+ super().__init__()
+
+ assert len(stages) == len(window_spec)
+ self.window_spec = window_spec
+
+ depth = sum(stages)
+ self.q_stride = q_stride
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
+ self.return_interm_layers = return_interm_layers
+
+ self.patch_embed = PatchEmbed(
+ embed_dim=embed_dim,
+ )
+ # Which blocks have global att?
+ self.global_att_blocks = global_att_blocks
+
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
+ self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
+ self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size))
+ self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
+
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
+
+ cur_stage = 1
+ self.blocks = nn.ModuleList()
+
+ for i in range(depth):
+ dim_out = embed_dim
+ # lags by a block, so first block of
+ # next stage uses an initial window size
+ # of previous stage and final window size of current stage
+ window_size = self.window_spec[cur_stage - 1]
+
+ if self.global_att_blocks is not None:
+ window_size = 0 if i in self.global_att_blocks else window_size
+
+ if i - 1 in self.stage_ends:
+ dim_out = int(embed_dim * dim_mul)
+ num_heads = int(num_heads * head_mul)
+ cur_stage += 1
+
+ block = MultiScaleBlock(
+ dim=embed_dim,
+ dim_out=dim_out,
+ num_heads=num_heads,
+ drop_path=dpr[i],
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
+ window_size=window_size,
+ )
+
+ embed_dim = dim_out
+ self.blocks.append(block)
+
+ self.channel_list = (
+ [self.blocks[i].dim_out for i in self.stage_ends[::-1]]
+ if return_interm_layers
+ else [self.blocks[-1].dim_out]
+ )
+
+ if weights_path is not None:
+ with g_pathmgr.open(weights_path, "rb") as f:
+ chkpt = torch.load(f, map_location="cpu")
+ logging.info("loading Hiera", self.load_state_dict(chkpt, strict=False))
+
+ def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
+ h, w = hw
+ window_embed = self.pos_embed_window
+ pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
+ pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
+ return pos_embed
+
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
+ x = self.patch_embed(x)
+ # x: (B, H, W, C)
+
+ # Add pos embed
+ x = x + self._get_pos_embed(x.shape[1:3])
+
+ outputs = []
+ for i, blk in enumerate(self.blocks):
+ x = blk(x)
+ if (i == self.stage_ends[-1]) or (i in self.stage_ends and self.return_interm_layers):
+ feats = x.permute(0, 3, 1, 2)
+ outputs.append(feats)
+
+ return outputs
+
+ def get_layer_id(self, layer_name):
+ # https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
+ num_layers = self.get_num_layers()
+
+ if layer_name.find("rel_pos") != -1:
+ return num_layers + 1
+ elif layer_name.find("pos_embed") != -1:
+ return 0
+ elif layer_name.find("patch_embed") != -1:
+ return 0
+ elif layer_name.find("blocks") != -1:
+ return int(layer_name.split("blocks")[1].split(".")[1]) + 1
+ else:
+ return num_layers + 1
+
+ def get_num_layers(self) -> int:
+ return len(self.blocks)
diff --git a/projects/PCSegSAM2/sam2/modeling/backbones/image_encoder.py b/projects/PCSegSAM2/sam2/modeling/backbones/image_encoder.py
new file mode 100644
index 00000000..ebb75370
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/backbones/image_encoder.py
@@ -0,0 +1,132 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import List, Optional
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class ImageEncoder(nn.Module):
+ def __init__(
+ self,
+ trunk: nn.Module,
+ neck: nn.Module,
+ scalp: int = 0,
+ ):
+ super().__init__()
+ self.trunk = trunk
+ self.neck = neck
+ self.scalp = scalp
+ assert (
+ self.trunk.channel_list == self.neck.backbone_channel_list
+ ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
+
+ def forward(self, sample: torch.Tensor):
+ # Forward through backbone
+ features, pos = self.neck(self.trunk(sample))
+ if self.scalp > 0:
+ # Discard the lowest resolution features
+ features, pos = features[: -self.scalp], pos[: -self.scalp]
+
+ src = features[-1]
+ output = {
+ "vision_features": src,
+ "vision_pos_enc": pos,
+ "backbone_fpn": features,
+ }
+ return output
+
+
+class FpnNeck(nn.Module):
+ """
+ A modified variant of Feature Pyramid Network (FPN) neck
+ (we remove output conv and also do bicubic interpolation similar to ViT
+ pos embed interpolation)
+ """
+
+ def __init__(
+ self,
+ position_encoding: nn.Module,
+ d_model: int,
+ backbone_channel_list: List[int],
+ kernel_size: int = 1,
+ stride: int = 1,
+ padding: int = 0,
+ fpn_interp_model: str = "bilinear",
+ fuse_type: str = "sum",
+ fpn_top_down_levels: Optional[List[int]] = None,
+ ):
+ """Initialize the neck
+ :param trunk: the backbone
+ :param position_encoding: the positional encoding to use
+ :param d_model: the dimension of the model
+ :param neck_norm: the normalization to use
+ """
+ super().__init__()
+ self.position_encoding = position_encoding
+ self.convs = nn.ModuleList()
+ self.backbone_channel_list = backbone_channel_list
+ self.d_model = d_model
+ for dim in backbone_channel_list:
+ current = nn.Sequential()
+ current.add_module(
+ "conv",
+ nn.Conv2d(
+ in_channels=dim,
+ out_channels=d_model,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ ),
+ )
+
+ self.convs.append(current)
+ self.fpn_interp_model = fpn_interp_model
+ assert fuse_type in ["sum", "avg"]
+ self.fuse_type = fuse_type
+
+ # levels to have top-down features in its outputs
+ # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
+ # have top-down propagation, while outputs of level 0 and level 1 have only
+ # lateral features from the same backbone level.
+ if fpn_top_down_levels is None:
+ # default is to have top-down features on all levels
+ fpn_top_down_levels = range(len(self.convs))
+ self.fpn_top_down_levels = list(fpn_top_down_levels)
+
+ def forward(self, xs: List[torch.Tensor]):
+
+ out = [None] * len(self.convs)
+ pos = [None] * len(self.convs)
+ assert len(xs) == len(self.convs)
+ # fpn forward pass
+ # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
+ prev_features = None
+ # forward in top-down order (from low to high resolution)
+ n = len(self.convs) - 1
+ for i in range(n, -1, -1):
+ x = xs[i]
+ lateral_features = self.convs[n - i](x)
+ if i in self.fpn_top_down_levels and prev_features is not None:
+ top_down_features = F.interpolate(
+ prev_features.to(dtype=torch.float32),
+ scale_factor=2.0,
+ mode=self.fpn_interp_model,
+ align_corners=(None if self.fpn_interp_model == "nearest" else False),
+ antialias=False,
+ )
+ prev_features = lateral_features + top_down_features
+ if self.fuse_type == "avg":
+ prev_features /= 2
+ else:
+ prev_features = lateral_features
+ x_out = prev_features
+ out[i] = x_out
+ pos[i] = self.position_encoding(x_out).to(x_out.dtype)
+
+ return out, pos
diff --git a/projects/PCSegSAM2/sam2/modeling/backbones/utils.py b/projects/PCSegSAM2/sam2/modeling/backbones/utils.py
new file mode 100644
index 00000000..620f312f
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/backbones/utils.py
@@ -0,0 +1,89 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Some utilities for backbones, in particular for windowing"""
+
+from typing import Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+def window_partition(x, window_size):
+ """
+ Partition into non-overlapping windows with padding if needed.
+ Args:
+ x (tensor): input tokens with [B, H, W, C].
+ window_size (int): window size.
+ Returns:
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
+ (Hp, Wp): padded height and width before partition
+ """
+ B, H, W, C = x.shape
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
+ Hp, Wp = H + pad_h, W + pad_w
+
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows, (Hp, Wp)
+
+
+def window_unpartition(windows, window_size, pad_hw, hw):
+ """
+ Window unpartition into original sequences and removing padding.
+ Args:
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
+ window_size (int): window size.
+ pad_hw (Tuple): padded height and width (Hp, Wp).
+ hw (Tuple): original height and width (H, W) before padding.
+ Returns:
+ x: unpartitioned sequences with [B, H, W, C].
+ """
+ Hp, Wp = pad_hw
+ H, W = hw
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
+
+ if Hp > H or Wp > W:
+ x = x[:, :H, :W, :].contiguous()
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self,
+ kernel_size: Tuple[int, ...] = (7, 7),
+ stride: Tuple[int, ...] = (4, 4),
+ padding: Tuple[int, ...] = (3, 3),
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ ):
+ """
+ Args:
+ kernel_size (Tuple): kernel size of the projection layer.
+ stride (Tuple): stride of the projection layer.
+ padding (Tuple): padding size of the projection layer.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
+ """
+ super().__init__()
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.proj(x)
+ # B C H W -> B H W C
+ x = x.permute(0, 2, 3, 1)
+ return x
diff --git a/projects/PCSegSAM2/sam2/modeling/memory_attention.py b/projects/PCSegSAM2/sam2/modeling/memory_attention.py
new file mode 100644
index 00000000..cb8663d4
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/memory_attention.py
@@ -0,0 +1,165 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional
+
+import torch
+from sam2.modeling.sam2_utils import get_activation_fn, get_clones
+from sam2.modeling.sam.transformer import RoPEAttention
+from torch import Tensor, nn
+
+
+class MemoryAttentionLayer(nn.Module):
+
+ def __init__(
+ self,
+ activation: str,
+ cross_attention: nn.Module,
+ d_model: int,
+ dim_feedforward: int,
+ dropout: float,
+ pos_enc_at_attn: bool,
+ pos_enc_at_cross_attn_keys: bool,
+ pos_enc_at_cross_attn_queries: bool,
+ self_attention: nn.Module,
+ ):
+ super().__init__()
+ self.d_model = d_model
+ self.dim_feedforward = dim_feedforward
+ self.dropout_value = dropout
+ self.self_attn = self_attention
+ self.cross_attn_image = cross_attention
+
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ self.dropout3 = nn.Dropout(dropout)
+
+ self.activation_str = activation
+ self.activation = get_activation_fn(activation)
+
+ # Where to add pos enc
+ self.pos_enc_at_attn = pos_enc_at_attn
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
+
+ def _forward_sa(self, tgt, query_pos):
+ # Self-Attention
+ tgt2 = self.norm1(tgt)
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
+ tgt2 = self.self_attn(q, k, v=tgt2)
+ tgt = tgt + self.dropout1(tgt2)
+ return tgt
+
+ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
+ kwds = {}
+ if num_k_exclude_rope > 0:
+ assert isinstance(self.cross_attn_image, RoPEAttention)
+ kwds = {"num_k_exclude_rope": num_k_exclude_rope}
+
+ # Cross-Attention
+ tgt2 = self.norm2(tgt)
+ tgt2 = self.cross_attn_image(
+ q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
+ k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
+ v=memory,
+ **kwds,
+ )
+ tgt = tgt + self.dropout2(tgt2)
+ return tgt
+
+ def forward(
+ self,
+ tgt,
+ memory,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None,
+ num_k_exclude_rope: int = 0,
+ ) -> torch.Tensor:
+
+ # Self-Attn, Cross-Attn
+ tgt = self._forward_sa(tgt, query_pos)
+ tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
+ # MLP
+ tgt2 = self.norm3(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout3(tgt2)
+ return tgt
+
+
+class MemoryAttention(nn.Module):
+ def __init__(
+ self,
+ d_model: int,
+ pos_enc_at_input: bool,
+ layer: nn.Module,
+ num_layers: int,
+ batch_first: bool = True, # Do layers expect batch first input?
+ ):
+ super().__init__()
+ self.d_model = d_model
+ self.layers = get_clones(layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = nn.LayerNorm(d_model)
+ self.pos_enc_at_input = pos_enc_at_input
+ self.batch_first = batch_first
+
+ def forward(
+ self,
+ curr: torch.Tensor, # self-attention inputs
+ memory: torch.Tensor, # cross-attention inputs
+ curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
+ memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
+ num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
+ ):
+ if isinstance(curr, list):
+ assert isinstance(curr_pos, list)
+ assert len(curr) == len(curr_pos) == 1
+ curr, curr_pos = (
+ curr[0],
+ curr_pos[0],
+ )
+
+ assert curr.shape[1] == memory.shape[1], "Batch size must be the same for curr and memory"
+
+ output = curr
+ if self.pos_enc_at_input and curr_pos is not None:
+ output = output + 0.1 * curr_pos
+
+ if self.batch_first:
+ # Convert to batch first
+ output = output.transpose(0, 1)
+ curr_pos = curr_pos.transpose(0, 1)
+ memory = memory.transpose(0, 1)
+ memory_pos = memory_pos.transpose(0, 1)
+
+ for layer in self.layers:
+ kwds = {}
+ if isinstance(layer.cross_attn_image, RoPEAttention):
+ kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
+
+ output = layer(
+ tgt=output,
+ memory=memory,
+ pos=memory_pos,
+ query_pos=curr_pos,
+ **kwds,
+ )
+ normed_output = self.norm(output)
+
+ if self.batch_first:
+ # Convert back to seq first
+ normed_output = normed_output.transpose(0, 1)
+ curr_pos = curr_pos.transpose(0, 1)
+
+ return normed_output
diff --git a/projects/PCSegSAM2/sam2/modeling/memory_encoder.py b/projects/PCSegSAM2/sam2/modeling/memory_encoder.py
new file mode 100644
index 00000000..e457c1f3
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/memory_encoder.py
@@ -0,0 +1,178 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from typing import Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from sam2.modeling.sam2_utils import DropPath, LayerNorm2d, get_clones
+
+
+class MaskDownSampler(nn.Module):
+ """
+ Progressively downsample a mask by total_stride, each time by stride.
+ Note that LayerNorm is applied per *token*, like in ViT.
+
+ With each downsample (by a factor stride**2), channel capacity increases by the same factor.
+ In the end, we linearly project to embed_dim channels.
+ """
+
+ def __init__(
+ self,
+ embed_dim=256,
+ kernel_size=4,
+ stride=4,
+ padding=0,
+ total_stride=16,
+ activation=nn.GELU,
+ ):
+ super().__init__()
+ num_layers = int(math.log2(total_stride) // math.log2(stride))
+ assert stride**num_layers == total_stride
+ self.encoder = nn.Sequential()
+ mask_in_chans, mask_out_chans = 1, 1
+ for _ in range(num_layers):
+ mask_out_chans = mask_in_chans * (stride**2)
+ self.encoder.append(
+ nn.Conv2d(
+ mask_in_chans,
+ mask_out_chans,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ )
+ )
+ self.encoder.append(LayerNorm2d(mask_out_chans))
+ self.encoder.append(activation())
+ mask_in_chans = mask_out_chans
+
+ self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
+
+ def forward(self, x):
+ return self.encoder(x)
+
+
+# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
+class CXBlock(nn.Module):
+ r"""ConvNeXt Block. There are two equivalent implementations:
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
+ We use (2) as we find it slightly faster in PyTorch
+
+ Args:
+ dim (int): Number of input channels.
+ drop_path (float): Stochastic depth rate. Default: 0.0
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
+ """
+
+ def __init__(
+ self,
+ dim,
+ kernel_size=7,
+ padding=3,
+ drop_path=0.0,
+ layer_scale_init_value=1e-6,
+ use_dwconv=True,
+ ):
+ super().__init__()
+ self.dwconv = nn.Conv2d(
+ dim,
+ dim,
+ kernel_size=kernel_size,
+ padding=padding,
+ groups=dim if use_dwconv else 1,
+ ) # depthwise conv
+ self.norm = LayerNorm2d(dim, eps=1e-6)
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
+ self.act = nn.GELU()
+ self.pwconv2 = nn.Linear(4 * dim, dim)
+ self.gamma = (
+ nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ if layer_scale_init_value > 0
+ else None
+ )
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ def forward(self, x):
+ input = x
+ x = self.dwconv(x)
+ x = self.norm(x)
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.pwconv2(x)
+ if self.gamma is not None:
+ x = self.gamma * x
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
+
+ x = input + self.drop_path(x)
+ return x
+
+
+class Fuser(nn.Module):
+ def __init__(self, layer, num_layers, dim=None, input_projection=False):
+ super().__init__()
+ self.proj = nn.Identity()
+ self.layers = get_clones(layer, num_layers)
+
+ if input_projection:
+ assert dim is not None
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
+
+ def forward(self, x):
+ # normally x: (N, C, H, W)
+ x = self.proj(x)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+
+class MemoryEncoder(nn.Module):
+ def __init__(
+ self,
+ out_dim,
+ mask_downsampler,
+ fuser,
+ position_encoding,
+ in_dim=256, # in_dim of pix_feats
+ ):
+ super().__init__()
+
+ self.mask_downsampler = mask_downsampler
+
+ self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
+ self.fuser = fuser
+ self.position_encoding = position_encoding
+ self.out_proj = nn.Identity()
+ if out_dim != in_dim:
+ self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
+
+ def forward(
+ self,
+ pix_feat: torch.Tensor,
+ masks: torch.Tensor,
+ skip_mask_sigmoid: bool = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ ## Process masks
+ # sigmoid, so that less domain shift from gt masks which are bool
+ if not skip_mask_sigmoid:
+ masks = F.sigmoid(masks)
+ masks = self.mask_downsampler(masks)
+
+ ## Fuse pix_feats and downsampled masks
+ # in case the visual features are on CPU, cast them to CUDA
+ pix_feat = pix_feat.to(masks.device)
+
+ x = self.pix_feat_proj(pix_feat)
+ x = x + masks
+ x = self.fuser(x)
+ x = self.out_proj(x)
+
+ pos = self.position_encoding(x).to(x.dtype)
+
+ return {"vision_features": x, "vision_pos_enc": [pos]}
diff --git a/projects/PCSegSAM2/sam2/modeling/position_encoding.py b/projects/PCSegSAM2/sam2/modeling/position_encoding.py
new file mode 100644
index 00000000..693657de
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/position_encoding.py
@@ -0,0 +1,206 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from typing import Any, Optional, Tuple
+
+import numpy as np
+import torch
+from torch import nn
+
+
+class PositionEmbeddingSine(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention Is All You Need paper, generalized to work on images.
+ """
+
+ def __init__(
+ self,
+ num_pos_feats,
+ temperature: int = 10000,
+ normalize: bool = True,
+ scale: Optional[float] = None,
+ ):
+ super().__init__()
+ assert num_pos_feats % 2 == 0, "Expecting even model width"
+ self.num_pos_feats = num_pos_feats // 2
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ self.cache = {}
+
+ def _encode_xy(self, x, y):
+ # The positions are expected to be normalized
+ assert len(x) == len(y) and x.ndim == y.ndim == 1
+ x_embed = x * self.scale
+ y_embed = y * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, None] / dim_t
+ pos_y = y_embed[:, None] / dim_t
+ pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1)
+ pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1)
+ return pos_x, pos_y
+
+ @torch.no_grad()
+ def encode_boxes(self, x, y, w, h):
+ pos_x, pos_y = self._encode_xy(x, y)
+ pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
+ return pos
+
+ encode = encode_boxes # Backwards compatibility
+
+ @torch.no_grad()
+ def encode_points(self, x, y, labels):
+ (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
+ assert bx == by and nx == ny and bx == bl and nx == nl
+ pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
+ pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
+ pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
+ return pos
+
+ @torch.no_grad()
+ def forward(self, x: torch.Tensor):
+ cache_key = (x.shape[-2], x.shape[-1])
+ if cache_key in self.cache:
+ return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
+ y_embed = (
+ torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
+ .view(1, -1, 1)
+ .repeat(x.shape[0], 1, x.shape[-1])
+ )
+ x_embed = (
+ torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
+ .view(1, 1, -1)
+ .repeat(x.shape[0], x.shape[-2], 1)
+ )
+
+ if self.normalize:
+ eps = 1e-6
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ self.cache[cache_key] = pos[0]
+ return pos
+
+
+class PositionEmbeddingRandom(nn.Module):
+ """
+ Positional encoding using random spatial frequencies.
+ """
+
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
+ super().__init__()
+ if scale is None or scale <= 0.0:
+ scale = 1.0
+ self.register_buffer(
+ "positional_encoding_gaussian_matrix",
+ scale * torch.randn((2, num_pos_feats)),
+ )
+
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
+ """Positionally encode points that are normalized to [0,1]."""
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
+ coords = 2 * coords - 1
+ coords = coords @ self.positional_encoding_gaussian_matrix
+ coords = 2 * np.pi * coords
+ # outputs d_1 x ... x d_n x C shape
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
+
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
+ """Generate positional encoding for a grid of the specified size."""
+ h, w = size
+ device: Any = self.positional_encoding_gaussian_matrix.device
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
+ y_embed = grid.cumsum(dim=0) - 0.5
+ x_embed = grid.cumsum(dim=1) - 0.5
+ y_embed = y_embed / h
+ x_embed = x_embed / w
+
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
+ return pe.permute(2, 0, 1) # C x H x W
+
+ def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor:
+ """Positionally encode points that are not normalized to [0,1]."""
+ coords = coords_input.clone()
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
+
+
+# Rotary Positional Encoding, adapted from:
+# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
+# 2. https://github.com/naver-ai/rope-vit
+# 3. https://github.com/lucidrains/rotary-embedding-torch
+
+
+def init_t_xy(end_x: int, end_y: int):
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
+ t_x = (t % end_x).float()
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
+ return t_x, t_y
+
+
+def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
+ freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
+ freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
+
+ t_x, t_y = init_t_xy(end_x, end_y)
+ freqs_x = torch.outer(t_x, freqs_x)
+ freqs_y = torch.outer(t_y, freqs_y)
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
+
+
+def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
+ ndim = x.ndim
+ assert 0 <= 1 < ndim
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
+ return freqs_cis.view(*shape)
+
+
+def apply_rotary_enc(
+ xq: torch.Tensor,
+ xk: torch.Tensor,
+ freqs_cis: torch.Tensor,
+ repeat_freqs_k: bool = False,
+):
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
+ if xk_ is None:
+ # no keys to rotate, due to dropout
+ return xq_out.type_as(xq).to(xq.device), xk
+ # repeat freqs along seq_len dim to match k seq_len
+ if repeat_freqs_k:
+ r = xk_.shape[-2] // xq_.shape[-2]
+ if freqs_cis.is_cuda:
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
+ else:
+ # torch.repeat on complex numbers may not be supported on non-CUDA devices
+ # (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten
+ freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3)
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
diff --git a/projects/PCSegSAM2/sam2/modeling/sam/__init__.py b/projects/PCSegSAM2/sam2/modeling/sam/__init__.py
new file mode 100644
index 00000000..5277f461
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/projects/PCSegSAM2/sam2/modeling/sam/mask_decoder.py b/projects/PCSegSAM2/sam2/modeling/sam/mask_decoder.py
new file mode 100644
index 00000000..9776c3dd
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam/mask_decoder.py
@@ -0,0 +1,273 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import List, Optional, Tuple, Type
+
+import torch
+from sam2.modeling.sam2_utils import MLP, LayerNorm2d
+from torch import nn
+
+
+class MaskDecoder(nn.Module):
+ def __init__(
+ self,
+ *,
+ transformer_dim: int,
+ transformer: nn.Module,
+ num_multimask_outputs: int = 3,
+ activation: Type[nn.Module] = nn.GELU,
+ iou_head_depth: int = 3,
+ iou_head_hidden_dim: int = 256,
+ use_high_res_features: bool = False,
+ iou_prediction_use_sigmoid=False,
+ dynamic_multimask_via_stability=False,
+ dynamic_multimask_stability_delta=0.05,
+ dynamic_multimask_stability_thresh=0.98,
+ pred_obj_scores: bool = False,
+ pred_obj_scores_mlp: bool = False,
+ use_multimask_token_for_obj_ptr: bool = False,
+ ) -> None:
+ """
+ Predicts masks given an image and prompt embeddings, using a
+ transformer architecture.
+
+ Arguments:
+ transformer_dim (int): the channel dimension of the transformer
+ transformer (nn.Module): the transformer used to predict masks
+ num_multimask_outputs (int): the number of masks to predict
+ when disambiguating masks
+ activation (nn.Module): the type of activation to use when
+ upscaling masks
+ iou_head_depth (int): the depth of the MLP used to predict
+ mask quality
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
+ used to predict mask quality
+ """
+ super().__init__()
+ self.transformer_dim = transformer_dim
+ self.transformer = transformer
+
+ self.num_multimask_outputs = num_multimask_outputs
+
+ self.iou_token = nn.Embedding(1, transformer_dim)
+ self.num_mask_tokens = num_multimask_outputs + 1
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
+
+ self.pred_obj_scores = pred_obj_scores
+ if self.pred_obj_scores:
+ self.obj_score_token = nn.Embedding(1, transformer_dim)
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
+
+ self.output_upscaling = nn.Sequential(
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
+ LayerNorm2d(transformer_dim // 4),
+ activation(),
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
+ activation(),
+ )
+ self.use_high_res_features = use_high_res_features
+ if use_high_res_features:
+ self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1)
+ self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1)
+
+ self.output_hypernetworks_mlps = nn.ModuleList(
+ [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)]
+ )
+
+ self.iou_prediction_head = MLP(
+ transformer_dim,
+ iou_head_hidden_dim,
+ self.num_mask_tokens,
+ iou_head_depth,
+ sigmoid_output=iou_prediction_use_sigmoid,
+ )
+ if self.pred_obj_scores:
+ self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
+ if pred_obj_scores_mlp:
+ self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
+
+ # When outputting a single mask, optionally we can dynamically fall back to the best
+ # multimask output token if the single mask output token gives low stability scores.
+ self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
+ self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
+ self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
+
+ def forward(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ multimask_output: bool,
+ repeat_image: bool,
+ high_res_features: Optional[List[torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Predict masks given image and prompt embeddings.
+
+ Arguments:
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
+ multimask_output (bool): Whether to return multiple masks or a single
+ mask.
+
+ Returns:
+ torch.Tensor: batched predicted masks
+ torch.Tensor: batched predictions of mask quality
+ torch.Tensor: batched SAM token for mask output
+ """
+ masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
+ image_embeddings=image_embeddings,
+ image_pe=image_pe,
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
+ dense_prompt_embeddings=dense_prompt_embeddings,
+ repeat_image=repeat_image,
+ high_res_features=high_res_features,
+ )
+
+ # Select the correct mask or masks for output
+ if multimask_output:
+ masks = masks[:, 1:, :, :]
+ iou_pred = iou_pred[:, 1:]
+ elif self.dynamic_multimask_via_stability and not self.training:
+ masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
+ else:
+ masks = masks[:, 0:1, :, :]
+ iou_pred = iou_pred[:, 0:1]
+
+ if multimask_output and self.use_multimask_token_for_obj_ptr:
+ sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
+ else:
+ # Take the mask output token. Here we *always* use the token for single mask output.
+ # At test time, even if we track after 1-click (and using multimask_output=True),
+ # we still take the single mask token here. The rationale is that we always track
+ # after multiple clicks during training, so the past tokens seen during training
+ # are always the single mask token (and we'll let it be the object-memory token).
+ sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
+
+ # Prepare output
+ return masks, iou_pred, sam_tokens_out, object_score_logits
+
+ def predict_masks(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ repeat_image: bool,
+ high_res_features: Optional[List[torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Predicts masks. See 'forward' for more details."""
+ # Concatenate output tokens
+ s = 0
+ if self.pred_obj_scores:
+ output_tokens = torch.cat(
+ [
+ self.obj_score_token.weight,
+ self.iou_token.weight,
+ self.mask_tokens.weight,
+ ],
+ dim=0,
+ )
+ s = 1
+ else:
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
+
+ # Expand per-image data in batch direction to be per-mask
+ if repeat_image:
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
+ else:
+ assert image_embeddings.shape[0] == tokens.shape[0]
+ src = image_embeddings
+ src = src + dense_prompt_embeddings
+ assert image_pe.size(0) == 1, "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
+ b, c, h, w = src.shape
+
+ # Run the transformer
+ hs, src = self.transformer(src, pos_src, tokens)
+ iou_token_out = hs[:, s, :]
+ mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
+
+ # Upscale mask embeddings and predict masks using the mask tokens
+ src = src.transpose(1, 2).view(b, c, h, w)
+ if not self.use_high_res_features:
+ upscaled_embedding = self.output_upscaling(src)
+ else:
+ dc1, ln1, act1, dc2, act2 = self.output_upscaling
+ feat_s0, feat_s1 = high_res_features
+ upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
+ upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
+
+ hyper_in_list: List[torch.Tensor] = []
+ for i in range(self.num_mask_tokens):
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
+ hyper_in = torch.stack(hyper_in_list, dim=1)
+ b, c, h, w = upscaled_embedding.shape
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
+
+ # Generate mask quality predictions
+ iou_pred = self.iou_prediction_head(iou_token_out)
+ if self.pred_obj_scores:
+ assert s == 1
+ object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
+ else:
+ # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
+ object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
+
+ return masks, iou_pred, mask_tokens_out, object_score_logits
+
+ def _get_stability_scores(self, mask_logits):
+ """
+ Compute stability scores of the mask logits based on the IoU between upper and
+ lower thresholds.
+ """
+ mask_logits = mask_logits.flatten(-2)
+ stability_delta = self.dynamic_multimask_stability_delta
+ area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
+ area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
+ stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
+ return stability_scores
+
+ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
+ """
+ When outputting a single mask, if the stability score from the current single-mask
+ output (based on output token 0) falls below a threshold, we instead select from
+ multi-mask outputs (based on output token 1~3) the mask with the highest predicted
+ IoU score. This is intended to ensure a valid mask for both clicking and tracking.
+ """
+ # The best mask from multimask output tokens (1~3)
+ multimask_logits = all_mask_logits[:, 1:, :, :]
+ multimask_iou_scores = all_iou_scores[:, 1:]
+ best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
+ batch_inds = torch.arange(multimask_iou_scores.size(0), device=all_iou_scores.device)
+ best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
+ best_multimask_logits = best_multimask_logits.unsqueeze(1)
+ best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
+ best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
+
+ # The mask from singlemask output token 0 and its stability score
+ singlemask_logits = all_mask_logits[:, 0:1, :, :]
+ singlemask_iou_scores = all_iou_scores[:, 0:1]
+ stability_scores = self._get_stability_scores(singlemask_logits)
+ is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
+
+ # Dynamically fall back to best multimask output upon low stability scores.
+ mask_logits_out = torch.where(
+ is_stable[..., None, None].expand_as(singlemask_logits),
+ singlemask_logits,
+ best_multimask_logits,
+ )
+ iou_scores_out = torch.where(
+ is_stable.expand_as(singlemask_iou_scores),
+ singlemask_iou_scores,
+ best_multimask_iou_scores,
+ )
+ return mask_logits_out, iou_scores_out
diff --git a/projects/PCSegSAM2/sam2/modeling/sam/prompt_encoder.py b/projects/PCSegSAM2/sam2/modeling/sam/prompt_encoder.py
new file mode 100644
index 00000000..c5c6e9a7
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam/prompt_encoder.py
@@ -0,0 +1,172 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional, Tuple, Type
+
+import torch
+from sam2.modeling.position_encoding import PositionEmbeddingRandom
+from sam2.modeling.sam2_utils import LayerNorm2d
+from torch import nn
+
+
+class PromptEncoder(nn.Module):
+ def __init__(
+ self,
+ embed_dim: int,
+ image_embedding_size: Tuple[int, int],
+ input_image_size: Tuple[int, int],
+ mask_in_chans: int,
+ activation: Type[nn.Module] = nn.GELU,
+ ) -> None:
+ """
+ Encodes prompts for input to SAM's mask decoder.
+
+ Arguments:
+ embed_dim (int): The prompts' embedding dimension
+ image_embedding_size (tuple(int, int)): The spatial size of the
+ image embedding, as (H, W).
+ input_image_size (int): The padded size of the image as input
+ to the image encoder, as (H, W).
+ mask_in_chans (int): The number of hidden channels used for
+ encoding input masks.
+ activation (nn.Module): The activation to use when encoding
+ input masks.
+ """
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.input_image_size = input_image_size
+ self.image_embedding_size = image_embedding_size
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
+
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
+ self.point_embeddings = nn.ModuleList(point_embeddings)
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
+
+ self.mask_input_size = (
+ 4 * image_embedding_size[0],
+ 4 * image_embedding_size[1],
+ )
+ self.mask_downscaling = nn.Sequential(
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans // 4),
+ activation(),
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans),
+ activation(),
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
+ )
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
+
+ def get_dense_pe(self) -> torch.Tensor:
+ """
+ Returns the positional encoding used to encode point prompts,
+ applied to a dense set of points the shape of the image encoding.
+
+ Returns:
+ torch.Tensor: Positional encoding with shape
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
+ """
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
+
+ def _embed_points(
+ self,
+ points: torch.Tensor,
+ labels: torch.Tensor,
+ pad: bool,
+ ) -> torch.Tensor:
+ """Embeds point prompts."""
+ points = points + 0.5 # Shift to center of pixel
+ if pad:
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
+ points = torch.cat([points, padding_point], dim=1)
+ labels = torch.cat([labels, padding_label], dim=1)
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
+ point_embedding[labels == -1] = 0.0
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
+ point_embedding[labels == 2] += self.point_embeddings[2].weight
+ point_embedding[labels == 3] += self.point_embeddings[3].weight
+ return point_embedding
+
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
+ """Embeds box prompts."""
+ boxes = boxes + 0.5 # Shift to center of pixel
+ coords = boxes.reshape(-1, 2, 2)
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
+ return corner_embedding
+
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
+ """Embeds mask inputs."""
+ mask_embedding = self.mask_downscaling(masks)
+ return mask_embedding
+
+ def _get_batch_size(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ ) -> int:
+ """
+ Gets the batch size of the output given the batch size of the input prompts.
+ """
+ if points is not None:
+ return points[0].shape[0]
+ elif boxes is not None:
+ return boxes.shape[0]
+ elif masks is not None:
+ return masks.shape[0]
+ else:
+ return 1
+
+ def _get_device(self) -> torch.device:
+ return self.point_embeddings[0].weight.device
+
+ def forward(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Embeds different types of prompts, returning both sparse and dense
+ embeddings.
+
+ Arguments:
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
+ and labels to embed.
+ boxes (torch.Tensor or none): boxes to embed
+ masks (torch.Tensor or none): masks to embed
+
+ Returns:
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
+ BxNx(embed_dim), where N is determined by the number of input points
+ and boxes.
+ torch.Tensor: dense embeddings for the masks, in the shape
+ Bx(embed_dim)x(embed_H)x(embed_W)
+ """
+ bs = self._get_batch_size(points, boxes, masks)
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
+ if points is not None:
+ coords, labels = points
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
+ if boxes is not None:
+ box_embeddings = self._embed_boxes(boxes)
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
+
+ if masks is not None:
+ dense_embeddings = self._embed_masks(masks)
+ else:
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
+ )
+
+ return sparse_embeddings, dense_embeddings
diff --git a/projects/PCSegSAM2/sam2/modeling/sam/transformer.py b/projects/PCSegSAM2/sam2/modeling/sam/transformer.py
new file mode 100644
index 00000000..108f81c2
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam/transformer.py
@@ -0,0 +1,343 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import contextlib
+import math
+import warnings
+from functools import partial
+from typing import Tuple, Type
+
+import torch
+import torch.nn.functional as F
+from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
+from sam2.modeling.sam2_utils import MLP
+from sam2.utils.misc import get_sdpa_settings
+from torch import Tensor, nn
+
+warnings.simplefilter(action="ignore", category=FutureWarning)
+# Check whether Flash Attention is available (and use it by default)
+OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
+# A fallback setting to allow all available kernels if Flash Attention fails
+ALLOW_ALL_KERNELS = False
+
+
+def sdp_kernel_context(dropout_p):
+ """
+ Get the context for the attention scaled dot-product kernel. We use Flash Attention
+ by default, but fall back to all available kernels if Flash Attention fails.
+ """
+ if ALLOW_ALL_KERNELS:
+ return contextlib.nullcontext()
+
+ return torch.backends.cuda.sdp_kernel(
+ enable_flash=USE_FLASH_ATTN,
+ # if Flash attention kernel is off, then math kernel needs to be enabled
+ enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
+ enable_mem_efficient=OLD_GPU,
+ )
+
+
+class TwoWayTransformer(nn.Module):
+ def __init__(
+ self,
+ depth: int,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ ) -> None:
+ """
+ A transformer decoder that attends to an input image using
+ queries whose positional embedding is supplied.
+
+ Args:
+ depth (int): number of layers in the transformer
+ embedding_dim (int): the channel dimension for the input embeddings
+ num_heads (int): the number of heads for multihead attention. Must
+ divide embedding_dim
+ mlp_dim (int): the channel dimension internal to the MLP block
+ activation (nn.Module): the activation to use in the MLP block
+ """
+ super().__init__()
+ self.depth = depth
+ self.embedding_dim = embedding_dim
+ self.num_heads = num_heads
+ self.mlp_dim = mlp_dim
+ self.layers = nn.ModuleList()
+
+ for i in range(depth):
+ self.layers.append(
+ TwoWayAttentionBlock(
+ embedding_dim=embedding_dim,
+ num_heads=num_heads,
+ mlp_dim=mlp_dim,
+ activation=activation,
+ attention_downsample_rate=attention_downsample_rate,
+ skip_first_layer_pe=(i == 0),
+ )
+ )
+
+ self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
+
+ def forward(
+ self,
+ image_embedding: Tensor,
+ image_pe: Tensor,
+ point_embedding: Tensor,
+ ) -> Tuple[Tensor, Tensor]:
+ """
+ Args:
+ image_embedding (torch.Tensor): image to attend to. Should be shape
+ B x embedding_dim x h x w for any h and w.
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
+ have the same shape as image_embedding.
+ point_embedding (torch.Tensor): the embedding to add to the query points.
+ Must have shape B x N_points x embedding_dim for any N_points.
+
+ Returns:
+ torch.Tensor: the processed point_embedding
+ torch.Tensor: the processed image_embedding
+ """
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
+ bs, c, h, w = image_embedding.shape
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
+
+ # Prepare queries
+ queries = point_embedding
+ keys = image_embedding
+
+ # Apply transformer blocks and final layernorm
+ for layer in self.layers:
+ queries, keys = layer(
+ queries=queries,
+ keys=keys,
+ query_pe=point_embedding,
+ key_pe=image_pe,
+ )
+
+ # Apply the final attention layer from the points to the image
+ q = queries + point_embedding
+ k = keys + image_pe
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm_final_attn(queries)
+
+ return queries, keys
+
+
+class TwoWayAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int = 2048,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ skip_first_layer_pe: bool = False,
+ ) -> None:
+ """
+ A transformer block with four layers: (1) self-attention of sparse
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
+ inputs.
+
+ Arguments:
+ embedding_dim (int): the channel dimension of the embeddings
+ num_heads (int): the number of heads in the attention layers
+ mlp_dim (int): the hidden dimension of the mlp block
+ activation (nn.Module): the activation of the mlp block
+ skip_first_layer_pe (bool): skip the PE on the first layer
+ """
+ super().__init__()
+ self.self_attn = Attention(embedding_dim, num_heads)
+ self.norm1 = nn.LayerNorm(embedding_dim)
+
+ self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+ self.norm2 = nn.LayerNorm(embedding_dim)
+
+ self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation)
+ self.norm3 = nn.LayerNorm(embedding_dim)
+
+ self.norm4 = nn.LayerNorm(embedding_dim)
+ self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+
+ self.skip_first_layer_pe = skip_first_layer_pe
+
+ def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:
+ # Self attention block
+ if self.skip_first_layer_pe:
+ queries = self.self_attn(q=queries, k=queries, v=queries)
+ else:
+ q = queries + query_pe
+ attn_out = self.self_attn(q=q, k=q, v=queries)
+ queries = queries + attn_out
+ queries = self.norm1(queries)
+
+ # Cross attention block, tokens attending to image embedding
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm2(queries)
+
+ # MLP block
+ mlp_out = self.mlp(queries)
+ queries = queries + mlp_out
+ queries = self.norm3(queries)
+
+ # Cross attention block, image embedding attending to tokens
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
+ keys = keys + attn_out
+ keys = self.norm4(keys)
+
+ return queries, keys
+
+
+class Attention(nn.Module):
+ """
+ An attention layer that allows for downscaling the size of the embedding
+ after projection to queries, keys, and values.
+ """
+
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ downsample_rate: int = 1,
+ dropout: float = 0.0,
+ kv_in_dim: int = None,
+ ) -> None:
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
+ self.internal_dim = embedding_dim // downsample_rate
+ self.num_heads = num_heads
+ assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
+
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
+ self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
+ self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
+
+ self.dropout_p = dropout
+
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
+ b, n, c = x.shape
+ x = x.reshape(b, n, num_heads, c // num_heads)
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
+
+ def _recombine_heads(self, x: Tensor) -> Tensor:
+ b, n_heads, n_tokens, c_per_head = x.shape
+ x = x.transpose(1, 2)
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
+ # Input projections
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ v = self.v_proj(v)
+
+ # Separate into heads
+ q = self._separate_heads(q, self.num_heads)
+ k = self._separate_heads(k, self.num_heads)
+ v = self._separate_heads(v, self.num_heads)
+
+ dropout_p = self.dropout_p if self.training else 0.0
+ # Attention
+ try:
+ with sdp_kernel_context(dropout_p):
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+ except Exception as e:
+ # Fall back to all kernels if the Flash attention kernel fails
+ warnings.warn(
+ f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
+ f"kernels for scaled_dot_product_attention (which may have a slower speed).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ global ALLOW_ALL_KERNELS
+ ALLOW_ALL_KERNELS = True
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+
+ out = self._recombine_heads(out)
+ out = self.out_proj(out)
+
+ return out
+
+
+class RoPEAttention(Attention):
+ """Attention with rotary position encoding."""
+
+ def __init__(
+ self,
+ *args,
+ rope_theta=10000.0,
+ # whether to repeat q rope to match k length
+ # this is needed for cross-attention to memories
+ rope_k_repeat=False,
+ feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+
+ self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta)
+ freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
+ self.freqs_cis = freqs_cis
+ self.rope_k_repeat = rope_k_repeat
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0) -> Tensor:
+ # Input projections
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ v = self.v_proj(v)
+
+ # Separate into heads
+ q = self._separate_heads(q, self.num_heads)
+ k = self._separate_heads(k, self.num_heads)
+ v = self._separate_heads(v, self.num_heads)
+
+ # Apply rotary position encoding
+ w = h = math.sqrt(q.shape[-2])
+ self.freqs_cis = self.freqs_cis.to(q.device)
+ if self.freqs_cis.shape[0] != q.shape[-2]:
+ self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
+ if q.shape[-2] != k.shape[-2]:
+ assert self.rope_k_repeat
+
+ num_k_rope = k.size(-2) - num_k_exclude_rope
+ q, k[:, :, :num_k_rope] = apply_rotary_enc(
+ q,
+ k[:, :, :num_k_rope],
+ freqs_cis=self.freqs_cis,
+ repeat_freqs_k=self.rope_k_repeat,
+ )
+
+ dropout_p = self.dropout_p if self.training else 0.0
+ # Attention
+ try:
+ with sdp_kernel_context(dropout_p):
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+ except Exception as e:
+ # Fall back to all kernels if the Flash attention kernel fails
+ warnings.warn(
+ f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
+ f"kernels for scaled_dot_product_attention (which may have a slower speed).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ global ALLOW_ALL_KERNELS
+ ALLOW_ALL_KERNELS = True
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+
+ out = self._recombine_heads(out)
+ out = self.out_proj(out)
+
+ return out
diff --git a/projects/PCSegSAM2/sam2/modeling/sam2_base.py b/projects/PCSegSAM2/sam2/modeling/sam2_base.py
new file mode 100644
index 00000000..ee053105
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam2_base.py
@@ -0,0 +1,877 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import torch
+import torch.distributed
+import torch.nn.functional as F
+from sam2.modeling.sam2_utils import MLP, get_1d_sine_pe, select_closest_cond_frames
+from sam2.modeling.sam.mask_decoder import MaskDecoder
+from sam2.modeling.sam.prompt_encoder import PromptEncoder
+from sam2.modeling.sam.transformer import TwoWayTransformer
+from torch.nn.init import trunc_normal_
+
+# a large negative value as a placeholder score for missing objects
+NO_OBJ_SCORE = -1024.0
+
+
+class SAM2Base(torch.nn.Module):
+ def __init__(
+ self,
+ image_encoder,
+ memory_attention,
+ memory_encoder,
+ num_maskmem=7, # default 1 input frame + 6 previous frames
+ image_size=512,
+ backbone_stride=16, # stride of the image backbone output
+ sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
+ sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
+ # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
+ binarize_mask_from_pts_for_mem_enc=False,
+ use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
+ # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
+ # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
+ # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
+ max_cond_frames_in_attn=-1,
+ # on the first frame, whether to directly add the no-memory embedding to the image feature
+ # (instead of using the transformer encoder)
+ directly_add_no_mem_embed=False,
+ # whether to use high-resolution feature maps in the SAM mask decoder
+ use_high_res_features_in_sam=False,
+ # whether to output multiple (3) masks for the first click on initial conditioning frames
+ multimask_output_in_sam=False,
+ # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
+ # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
+ multimask_min_pt_num=1,
+ multimask_max_pt_num=1,
+ # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
+ multimask_output_for_tracking=False,
+ # Whether to use multimask tokens for obj ptr; Only relevant when both
+ # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
+ use_multimask_token_for_obj_ptr: bool = False,
+ # whether to use sigmoid to restrict ious prediction to [0-1]
+ iou_prediction_use_sigmoid=False,
+ # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
+ # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
+ # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
+ memory_temporal_stride_for_eval=1,
+ # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
+ non_overlap_masks_for_mem_enc=False,
+ # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder=False,
+ # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
+ max_obj_ptrs_in_encoder=16,
+ # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
+ add_tpos_enc_to_obj_ptrs=True,
+ # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
+ # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
+ proj_tpos_enc_in_obj_ptrs=False,
+ # whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers
+ # (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
+ use_signed_tpos_enc_to_obj_ptrs=False,
+ # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
+ # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
+ only_obj_ptrs_in_the_past_for_eval=False,
+ # Whether to predict if there is an object in the frame
+ pred_obj_scores: bool = False,
+ # Whether to use an MLP to predict object scores
+ pred_obj_scores_mlp: bool = False,
+ # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
+ # Whether to have a fixed no obj pointer when there is no object present
+ # or to use it as an additive embedding with obj_ptr produced by decoder
+ fixed_no_obj_ptr: bool = False,
+ # Soft no object, i.e. mix in no_obj_ptr softly,
+ # hope to make recovery easier if there is a mistake and mitigate accumulation of errors
+ soft_no_obj_ptr: bool = False,
+ use_mlp_for_obj_ptr_proj: bool = False,
+ # add no obj embedding to spatial frames
+ no_obj_embed_spatial: bool = False,
+ # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
+ sam_mask_decoder_extra_args=None,
+ compile_image_encoder: bool = False,
+ ):
+ super().__init__()
+
+ # Part 1: the image backbone
+ self.image_encoder = image_encoder
+ # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
+ self.use_high_res_features_in_sam = use_high_res_features_in_sam
+ self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
+ self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
+ self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
+ if use_obj_ptrs_in_encoder:
+ # A conv layer to downsample the mask prompt to stride 4 (the same stride as
+ # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
+ # so that it can be fed into the SAM mask decoder to generate a pointer.
+ self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
+ self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
+ if proj_tpos_enc_in_obj_ptrs:
+ assert add_tpos_enc_to_obj_ptrs # these options need to be used together
+ self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
+ self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
+ self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
+
+ # Part 2: memory attention to condition current frame's visual features
+ # with memories (and obj ptrs) from past frames
+ self.memory_attention = memory_attention
+ self.hidden_dim = image_encoder.neck.d_model
+
+ # Part 3: memory encoder for the previous frame's outputs
+ self.memory_encoder = memory_encoder
+ self.mem_dim = self.hidden_dim
+ if hasattr(self.memory_encoder, "out_proj") and hasattr(self.memory_encoder.out_proj, "weight"):
+ # if there is compression of memories along channel dim
+ self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
+ self.num_maskmem = num_maskmem # Number of memories accessible
+ # Temporal encoding of the memories
+ self.maskmem_tpos_enc = torch.nn.Parameter(torch.zeros(num_maskmem, 1, 1, self.mem_dim))
+ trunc_normal_(self.maskmem_tpos_enc, std=0.02)
+ # a single token to indicate no memory embedding from previous frames
+ self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
+ self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
+ trunc_normal_(self.no_mem_embed, std=0.02)
+ trunc_normal_(self.no_mem_pos_enc, std=0.02)
+ self.directly_add_no_mem_embed = directly_add_no_mem_embed
+ # Apply sigmoid to the output raw mask logits (to turn them from
+ # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
+ self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
+ self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
+ self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
+ self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
+ self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
+ # On frames with mask input, whether to directly output the input mask without
+ # using a SAM prompt encoder + mask decoder
+ self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
+ self.multimask_output_in_sam = multimask_output_in_sam
+ self.multimask_min_pt_num = multimask_min_pt_num
+ self.multimask_max_pt_num = multimask_max_pt_num
+ self.multimask_output_for_tracking = multimask_output_for_tracking
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
+ self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
+
+ # Part 4: SAM-style prompt encoder (for both mask and point inputs)
+ # and SAM-style mask decoder for the final mask output
+ self.image_size = image_size
+ self.backbone_stride = backbone_stride
+ self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
+ self.pred_obj_scores = pred_obj_scores
+ self.pred_obj_scores_mlp = pred_obj_scores_mlp
+ self.fixed_no_obj_ptr = fixed_no_obj_ptr
+ self.soft_no_obj_ptr = soft_no_obj_ptr
+ if self.fixed_no_obj_ptr:
+ assert self.pred_obj_scores
+ assert self.use_obj_ptrs_in_encoder
+ if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
+ self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
+ trunc_normal_(self.no_obj_ptr, std=0.02)
+ self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
+ self.no_obj_embed_spatial = None
+ if no_obj_embed_spatial:
+ self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
+ trunc_normal_(self.no_obj_embed_spatial, std=0.02)
+
+ self._build_sam_heads()
+ self.max_cond_frames_in_attn = max_cond_frames_in_attn
+
+ # Model compilation
+ if compile_image_encoder:
+ # Compile the forward function (not the full module) to allow loading checkpoints.
+ print("Image encoder compilation is enabled. First forward pass will be slow.")
+ self.image_encoder.forward = torch.compile(
+ self.image_encoder.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=False,
+ )
+
+ @property
+ def device(self):
+ return next(self.parameters()).device
+
+ def forward(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning"
+ "See notebooks/video_predictor_example.ipynb for an inference example."
+ )
+
+ def _build_sam_heads(self):
+ """Build SAM-style prompt encoder and mask decoder."""
+ self.sam_prompt_embed_dim = self.hidden_dim
+ self.sam_image_embedding_size = self.image_size // self.backbone_stride
+
+ # build PromptEncoder and MaskDecoder from SAM
+ # (their hyperparameters like `mask_in_chans=16` are from SAM code)
+ self.sam_prompt_encoder = PromptEncoder(
+ embed_dim=self.sam_prompt_embed_dim,
+ image_embedding_size=(
+ self.sam_image_embedding_size,
+ self.sam_image_embedding_size,
+ ),
+ input_image_size=(self.image_size, self.image_size),
+ mask_in_chans=16,
+ )
+ self.sam_mask_decoder = MaskDecoder(
+ num_multimask_outputs=3,
+ transformer=TwoWayTransformer(
+ depth=2,
+ embedding_dim=self.sam_prompt_embed_dim,
+ mlp_dim=2048,
+ num_heads=8,
+ ),
+ transformer_dim=self.sam_prompt_embed_dim,
+ iou_head_depth=3,
+ iou_head_hidden_dim=256,
+ use_high_res_features=self.use_high_res_features_in_sam,
+ iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
+ pred_obj_scores=self.pred_obj_scores,
+ pred_obj_scores_mlp=self.pred_obj_scores_mlp,
+ use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
+ **(self.sam_mask_decoder_extra_args or {}),
+ )
+ if self.use_obj_ptrs_in_encoder:
+ # a linear projection on SAM output tokens to turn them into object pointers
+ self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
+ if self.use_mlp_for_obj_ptr_proj:
+ self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
+ else:
+ self.obj_ptr_proj = torch.nn.Identity()
+ if self.proj_tpos_enc_in_obj_ptrs:
+ # a linear projection on temporal positional encoding in object pointers to
+ # avoid potential interference with spatial positional encoding
+ self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
+ else:
+ self.obj_ptr_tpos_proj = torch.nn.Identity()
+
+ def _forward_sam_heads(
+ self,
+ backbone_features,
+ point_inputs=None,
+ mask_inputs=None,
+ high_res_features=None,
+ multimask_output=False,
+ ):
+ """
+ Forward SAM prompt encoders and mask heads.
+
+ Inputs:
+ - backbone_features: image features of [B, C, H, W] shape
+ - point_inputs: a dictionary with "point_coords" and "point_labels", where
+ 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
+ absolute pixel-unit coordinate in (x, y) format of the P input points
+ 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
+ positive clicks, 0 means negative clicks, and -1 means padding
+ - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
+ same spatial size as the image.
+ - high_res_features: either 1) None or 2) or a list of length 2 containing
+ two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
+ which will be used as high-resolution feature maps for SAM decoder.
+ - multimask_output: if it's True, we output 3 candidate masks and their 3
+ corresponding IoU estimates, and if it's False, we output only 1 mask and
+ its corresponding IoU estimate.
+
+ Outputs:
+ - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
+ `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
+ output mask logits (before sigmoid) for the low-resolution masks, with 4x
+ the resolution (1/4 stride) of the input backbone_features.
+ - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
+ if `multimask_output=True` and M = 1 if `multimask_output=False`),
+ upsampled from the low-resolution masks, with shape size as the image
+ (stride is 1 pixel).
+ - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
+ if `multimask_output=False`), the estimated IoU of each output mask.
+ - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
+ If `multimask_output=False`, it's the same as `low_res_multimasks`.
+ - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
+ If `multimask_output=False`, it's the same as `high_res_multimasks`.
+ - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
+ based on the output token from the SAM mask decoder.
+ """
+ B = backbone_features.size(0)
+ device = backbone_features.device
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
+ assert backbone_features.size(2) == self.sam_image_embedding_size
+ assert backbone_features.size(3) == self.sam_image_embedding_size
+
+ # a) Handle point prompts
+ if point_inputs is not None:
+ sam_point_coords = point_inputs["point_coords"]
+ sam_point_labels = point_inputs["point_labels"]
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
+ else:
+ # If no points are provide, pad with an empty point (with label -1)
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
+
+ # b) Handle mask prompts
+ if mask_inputs is not None:
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
+ # and feed it as a dense mask prompt into the SAM mask encoder
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
+ sam_mask_prompt = F.interpolate(
+ mask_inputs.float(),
+ size=self.sam_prompt_encoder.mask_input_size,
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ else:
+ sam_mask_prompt = mask_inputs
+ else:
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
+ # a learned `no_mask_embed` to indicate no mask input in this case).
+ sam_mask_prompt = None
+
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
+ points=(sam_point_coords, sam_point_labels),
+ boxes=None,
+ masks=sam_mask_prompt,
+ )
+ (
+ low_res_multimasks,
+ ious,
+ sam_output_tokens,
+ object_score_logits,
+ ) = self.sam_mask_decoder(
+ image_embeddings=backbone_features,
+ image_pe=self.sam_prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ repeat_image=False, # the image is already batched
+ high_res_features=high_res_features,
+ )
+ if self.pred_obj_scores:
+ is_obj_appearing = object_score_logits > 0
+
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
+ # consistent with the actual mask prediction
+ low_res_multimasks = torch.where(
+ is_obj_appearing[:, None, None],
+ low_res_multimasks,
+ NO_OBJ_SCORE,
+ )
+
+ # convert masks from possibly bfloat16 (or float16) to float32
+ # (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
+ low_res_multimasks = low_res_multimasks.float()
+ high_res_multimasks = F.interpolate(
+ low_res_multimasks,
+ size=(self.image_size, self.image_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+
+ sam_output_token = sam_output_tokens[:, 0]
+ if multimask_output:
+ # take the best mask prediction (with the highest IoU estimation)
+ best_iou_inds = torch.argmax(ious, dim=-1)
+ batch_inds = torch.arange(B, device=device)
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ if sam_output_tokens.size(1) > 1:
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
+ else:
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
+
+ # Extract object pointer from the SAM output token (with occlusion handling)
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
+ if self.pred_obj_scores:
+ # Allow *soft* no obj ptr, unlike for masks
+ if self.soft_no_obj_ptr:
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
+ else:
+ lambda_is_obj_appearing = is_obj_appearing.float()
+
+ if self.fixed_no_obj_ptr:
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
+
+ return (
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ )
+
+ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
+ """
+ Directly turn binary `mask_inputs` into a output mask logits without using SAM.
+ (same input and output shapes as in _forward_sam_heads above).
+ """
+ # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
+ out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
+ mask_inputs_float = mask_inputs.float()
+ high_res_masks = mask_inputs_float * out_scale + out_bias
+ low_res_masks = F.interpolate(
+ high_res_masks,
+ size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ # a dummy IoU prediction of all 1's under mask input
+ ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
+ if not self.use_obj_ptrs_in_encoder:
+ # all zeros as a dummy object pointer (of shape [B, C])
+ obj_ptr = torch.zeros(mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device)
+ else:
+ # produce an object pointer using the SAM decoder from the mask input
+ _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
+ backbone_features=backbone_features,
+ mask_inputs=self.mask_downsample(mask_inputs_float),
+ high_res_features=high_res_features,
+ )
+ # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
+ # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
+ # on the object_scores from the SAM decoder.
+ is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
+ is_obj_appearing = is_obj_appearing[..., None]
+ lambda_is_obj_appearing = is_obj_appearing.float()
+ object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
+ if self.pred_obj_scores:
+ if self.fixed_no_obj_ptr:
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
+
+ return (
+ low_res_masks,
+ high_res_masks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ )
+
+ def forward_image(self, img_batch: torch.Tensor):
+ """Get the image feature on the input batch."""
+ backbone_out = self.image_encoder(img_batch)
+ if self.use_high_res_features_in_sam:
+ # precompute projected level 0 and level 1 features in SAM decoder
+ # to avoid running it again on every SAM click
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
+ return backbone_out
+
+ def _prepare_backbone_features(self, backbone_out):
+ """Prepare and flatten visual features."""
+ backbone_out = backbone_out.copy()
+ assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
+ assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
+
+ feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
+ vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
+
+ feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
+ # flatten NxCxHxW to HWxNxC
+ vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
+ vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
+
+ return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
+
+ def _prepare_memory_conditioned_features(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ output_dict,
+ num_frames,
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
+ ):
+ """Fuse the current frame's visual feature map with previous memory."""
+ B = current_vision_feats[-1].size(1) # batch size on this frame
+ C = self.hidden_dim
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
+ device = current_vision_feats[-1].device
+ # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
+ # In this case, we skip the fusion with any memory.
+ if self.num_maskmem == 0: # Disable memory and skip fusion
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat
+
+ num_obj_ptr_tokens = 0
+ tpos_sign_mul = -1 if track_in_reverse else 1
+ # Step 1: condition the visual features of the current frame on previous memories
+ if not is_init_cond_frame:
+ # Retrieve the memories encoded with the maskmem backbone
+ to_cat_memory, to_cat_memory_pos_embed = [], []
+ # Add conditioning frames's output first (all cond frames have t_pos=0 for
+ # when getting temporal positional embedding below)
+ assert len(output_dict["cond_frame_outputs"]) > 0
+ # Select a maximum number of temporally closest cond frames for cross attention
+ cond_outputs = output_dict["cond_frame_outputs"]
+ selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
+ frame_idx, cond_outputs, self.max_cond_frames_in_attn
+ )
+ t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
+ # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
+ # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
+ # We also allow taking the memory frame non-consecutively (with stride>1), in which case
+ # we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame.
+ stride = 1 if self.training else self.memory_temporal_stride_for_eval
+ for t_pos in range(1, self.num_maskmem):
+ t_rel = self.num_maskmem - t_pos # how many frames before current frame
+ if t_rel == 1:
+ # for t_rel == 1, we take the last frame (regardless of r)
+ if not track_in_reverse:
+ # the frame immediately before this frame (i.e. frame_idx - 1)
+ prev_frame_idx = frame_idx - t_rel
+ else:
+ # the frame immediately after this frame (i.e. frame_idx + 1)
+ prev_frame_idx = frame_idx + t_rel
+ else:
+ # for t_rel >= 2, we take the memory frame from every r-th frames
+ if not track_in_reverse:
+ # first find the nearest frame among every r-th frames before this frame
+ # for r=1, this would be (frame_idx - 2)
+ prev_frame_idx = ((frame_idx - 2) // stride) * stride
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride
+ else:
+ # first find the nearest frame among every r-th frames after this frame
+ # for r=1, this would be (frame_idx + 2)
+ prev_frame_idx = -(-(frame_idx + 2) // stride) * stride
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride
+ out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
+ if out is None:
+ # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
+ # frames, we still attend to it as if it's a non-conditioning frame.
+ out = unselected_cond_outputs.get(prev_frame_idx, None)
+ t_pos_and_prevs.append((t_pos, out))
+
+ for t_pos, prev in t_pos_and_prevs:
+ if prev is None:
+ continue # skip padding frames
+ # "maskmem_features" might have been offloaded to CPU in demo use cases,
+ # so we load it back to GPU (it's a no-op if it's already on GPU).
+ feats = prev["maskmem_features"].to(device, non_blocking=True)
+ to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
+ # Spatial positional encoding (it might have been offloaded to CPU in eval)
+ maskmem_enc = prev["maskmem_pos_enc"][-1].to(device)
+ maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
+ # Temporal positional encoding
+ maskmem_enc = maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
+ to_cat_memory_pos_embed.append(maskmem_enc)
+
+ # Construct the list of past object pointers
+ if self.use_obj_ptrs_in_encoder:
+ max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
+ # First add those object pointers from selected conditioning frames
+ # (optionally, only include object pointers in the past during evaluation)
+ if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
+ ptr_cond_outputs = {
+ t: out
+ for t, out in selected_cond_outputs.items()
+ if (t >= frame_idx if track_in_reverse else t <= frame_idx)
+ }
+ else:
+ ptr_cond_outputs = selected_cond_outputs
+ pos_and_ptrs = [
+ # Temporal pos encoding contains how far away each pointer is from current frame
+ (
+ (
+ (frame_idx - t) * tpos_sign_mul
+ if self.use_signed_tpos_enc_to_obj_ptrs
+ else abs(frame_idx - t)
+ ),
+ out["obj_ptr"],
+ )
+ for t, out in ptr_cond_outputs.items()
+ ]
+ # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
+ for t_diff in range(1, max_obj_ptrs_in_encoder):
+ t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
+ if t < 0 or (num_frames is not None and t >= num_frames):
+ break
+ out = output_dict["non_cond_frame_outputs"].get(t, unselected_cond_outputs.get(t, None))
+ if out is not None:
+ pos_and_ptrs.append((t_diff, out["obj_ptr"]))
+ # If we have at least one object pointer, add them to the across attention
+ if len(pos_and_ptrs) > 0:
+ pos_list, ptrs_list = zip(*pos_and_ptrs)
+ # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
+ obj_ptrs = torch.stack(ptrs_list, dim=0)
+ # a temporal positional embedding based on how far each object pointer is from
+ # the current frame (sine embedding normalized by the max pointer num).
+ if self.add_tpos_enc_to_obj_ptrs:
+ t_diff_max = max_obj_ptrs_in_encoder - 1
+ tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
+ obj_pos = torch.tensor(pos_list, device=device)
+ obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
+ obj_pos = self.obj_ptr_tpos_proj(obj_pos)
+ obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
+ else:
+ obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
+ if self.mem_dim < C:
+ # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
+ obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim)
+ obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
+ obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
+ to_cat_memory.append(obj_ptrs)
+ to_cat_memory_pos_embed.append(obj_pos)
+ num_obj_ptr_tokens = obj_ptrs.shape[0]
+ else:
+ num_obj_ptr_tokens = 0
+ else:
+ # for initial conditioning frames, encode them without using any previous memory
+ if self.directly_add_no_mem_embed:
+ # directly add no-mem embedding (instead of using the transformer encoder)
+ pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat_with_mem
+
+ # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder)
+ to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
+ to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
+
+ # Step 2: Concatenate the memories and forward through the transformer encoder
+ memory = torch.cat(to_cat_memory, dim=0)
+ memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
+
+ pix_feat_with_mem = self.memory_attention(
+ curr=current_vision_feats,
+ curr_pos=current_vision_pos_embeds,
+ memory=memory,
+ memory_pos=memory_pos_embed,
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
+ )
+ # reshape the output (HW)BC => BCHW
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat_with_mem
+
+ def _encode_new_memory(
+ self,
+ current_vision_feats,
+ feat_sizes,
+ pred_masks_high_res,
+ object_score_logits,
+ is_mask_from_pts,
+ ):
+ """Encode the current image and its prediction into a memory feature."""
+ B = current_vision_feats[-1].size(1) # batch size on this frame
+ C = self.hidden_dim
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
+ # top-level feature, (HW)BC => BCHW
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
+ if self.non_overlap_masks_for_mem_enc and not self.training:
+ # optionally, apply non-overlapping constraints to the masks (it's applied
+ # in the batch dimension and should only be used during eval, where all
+ # the objects come from the same video under batch size 1).
+ pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
+ # scale the raw mask logits with a temperature before applying sigmoid
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
+ if binarize and not self.training:
+ mask_for_mem = (pred_masks_high_res > 0).float()
+ else:
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
+ # apply scale and bias terms to the sigmoid probabilities
+ if self.sigmoid_scale_for_mem_enc != 1.0:
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
+ if self.sigmoid_bias_for_mem_enc != 0.0:
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
+ maskmem_out = self.memory_encoder(pix_feat, mask_for_mem, skip_mask_sigmoid=True) # sigmoid already applied
+ maskmem_features = maskmem_out["vision_features"]
+ maskmem_pos_enc = maskmem_out["vision_pos_enc"]
+ # add a no-object embedding to the spatial memory to indicate that the frame
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
+ if self.no_obj_embed_spatial is not None:
+ is_obj_appearing = (object_score_logits > 0).float()
+ maskmem_features += (1 - is_obj_appearing[..., None, None]) * self.no_obj_embed_spatial[
+ ..., None, None
+ ].expand(*maskmem_features.shape)
+
+ return maskmem_features, maskmem_pos_enc
+
+ def _track_step(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse,
+ prev_sam_mask_logits,
+ ):
+ current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
+ # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
+ if len(current_vision_feats) > 1:
+ high_res_features = [
+ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
+ for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
+ ]
+ else:
+ high_res_features = None
+ if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
+ # When use_mask_input_as_output_without_sam=True, we directly output the mask input
+ # (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0)
+ pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
+ sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
+ else:
+ # fused the visual feature with previous memory features in the memory bank
+ pix_feat = self._prepare_memory_conditioned_features(
+ frame_idx=frame_idx,
+ is_init_cond_frame=is_init_cond_frame,
+ current_vision_feats=current_vision_feats[-1:],
+ current_vision_pos_embeds=current_vision_pos_embeds[-1:],
+ feat_sizes=feat_sizes[-1:],
+ output_dict=output_dict,
+ num_frames=num_frames,
+ track_in_reverse=track_in_reverse,
+ )
+ # apply SAM-style segmentation head
+ # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
+ # e.g. in demo where such logits come from earlier interaction instead of correction sampling
+ # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
+ if prev_sam_mask_logits is not None:
+ assert point_inputs is not None and mask_inputs is None
+ mask_inputs = prev_sam_mask_logits
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
+ sam_outputs = self._forward_sam_heads(
+ backbone_features=pix_feat,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ high_res_features=high_res_features,
+ multimask_output=multimask_output,
+ )
+
+ return current_out, sam_outputs, high_res_features, pix_feat
+
+ def _encode_memory_in_output(
+ self,
+ current_vision_feats,
+ feat_sizes,
+ point_inputs,
+ run_mem_encoder,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ ):
+ if run_mem_encoder and self.num_maskmem > 0:
+ high_res_masks_for_mem_enc = high_res_masks
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
+ current_vision_feats=current_vision_feats,
+ feat_sizes=feat_sizes,
+ pred_masks_high_res=high_res_masks_for_mem_enc,
+ object_score_logits=object_score_logits,
+ is_mask_from_pts=(point_inputs is not None),
+ )
+ current_out["maskmem_features"] = maskmem_features
+ current_out["maskmem_pos_enc"] = maskmem_pos_enc
+ else:
+ current_out["maskmem_features"] = None
+ current_out["maskmem_pos_enc"] = None
+
+ def track_step(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
+ # Whether to run the memory encoder on the predicted masks. Sometimes we might want
+ # to skip the memory encoder with `run_mem_encoder=False`. For example,
+ # in demo we might call `track_step` multiple times for each user click,
+ # and only encode the memory when the user finalizes their clicks. And in ablation
+ # settings like SAM training on static images, we don't need the memory encoder.
+ run_mem_encoder=True,
+ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
+ prev_sam_mask_logits=None,
+ ):
+ current_out, sam_outputs, _, _ = self._track_step(
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse,
+ prev_sam_mask_logits,
+ )
+
+ (
+ _,
+ _,
+ _,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ ) = sam_outputs
+
+ current_out["pred_masks"] = low_res_masks
+ current_out["pred_masks_high_res"] = high_res_masks
+ current_out["obj_ptr"] = obj_ptr
+ if not self.training:
+ # Only add this in inference (to avoid unused param in activation checkpointing;
+ # it's mainly used in the demo to encode spatial memories w/ consolidated masks)
+ current_out["object_score_logits"] = object_score_logits
+
+ # Finally run the memory encoder on the predicted mask to encode
+ # it into a new memory feature (that can be used in future frames)
+ self._encode_memory_in_output(
+ current_vision_feats,
+ feat_sizes,
+ point_inputs,
+ run_mem_encoder,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ )
+
+ return current_out
+
+ def _use_multimask(self, is_init_cond_frame, point_inputs):
+ """Whether to use multimask output in the SAM head."""
+ num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
+ multimask_output = (
+ self.multimask_output_in_sam
+ and (is_init_cond_frame or self.multimask_output_for_tracking)
+ and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
+ )
+ return multimask_output
+
+ def _apply_non_overlapping_constraints(self, pred_masks):
+ """
+ Apply non-overlapping constraints to the object scores in pred_masks. Here we
+ keep only the highest scoring object at each spatial location in pred_masks.
+ """
+ batch_size = pred_masks.size(0)
+ if batch_size == 1:
+ return pred_masks
+
+ device = pred_masks.device
+ # "max_obj_inds": object index of the object with the highest score at each location
+ max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
+ # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
+ batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
+ keep = max_obj_inds == batch_obj_inds
+ # suppress overlapping regions' scores below -10.0 so that the foreground regions
+ # don't overlap (here sigmoid(-10.0)=4.5398e-05)
+ pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
+ return pred_masks
diff --git a/projects/PCSegSAM2/sam2/modeling/sam2_utils.py b/projects/PCSegSAM2/sam2/modeling/sam2_utils.py
new file mode 100644
index 00000000..890af8b0
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/modeling/sam2_utils.py
@@ -0,0 +1,314 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import copy
+from typing import Tuple
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from sam2.utils.misc import mask_to_box
+
+
+def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
+ """
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
+ - a) the closest conditioning frame before `frame_idx` (if any);
+ - b) the closest conditioning frame after `frame_idx` (if any);
+ - c) any other temporally closest conditioning frames until reaching a total
+ of `max_cond_frame_num` conditioning frames.
+
+ Outputs:
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
+ """
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
+ selected_outputs = cond_frame_outputs
+ unselected_outputs = {}
+ else:
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
+ selected_outputs = {}
+
+ # the closest conditioning frame before `frame_idx` (if any)
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
+ if idx_before is not None:
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
+
+ # the closest conditioning frame after `frame_idx` (if any)
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
+ if idx_after is not None:
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
+
+ # add other temporally closest conditioning frames until reaching a total
+ # of `max_cond_frame_num` conditioning frames.
+ num_remain = max_cond_frame_num - len(selected_outputs)
+ inds_remain = sorted(
+ (t for t in cond_frame_outputs if t not in selected_outputs),
+ key=lambda x: abs(x - frame_idx),
+ )[:num_remain]
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
+ unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
+
+ return selected_outputs, unselected_outputs
+
+
+def get_1d_sine_pe(pos_inds, dim, temperature=10000):
+ """
+ Get 1D sine positional embedding as in the original Transformer paper.
+ """
+ pe_dim = dim // 2
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
+ dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
+
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
+ return pos_embed
+
+
+def get_activation_fn(activation):
+ """Return an activation function given a string"""
+ if activation == "relu":
+ return F.relu
+ if activation == "gelu":
+ return F.gelu
+ if activation == "glu":
+ return F.glu
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
+
+
+def get_clones(module, N):
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
+
+
+class DropPath(nn.Module):
+ # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
+ def __init__(self, drop_prob=0.0, scale_by_keep=True):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+ self.scale_by_keep = scale_by_keep
+
+ def forward(self, x):
+ if self.drop_prob == 0.0 or not self.training:
+ return x
+ keep_prob = 1 - self.drop_prob
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1)
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0 and self.scale_by_keep:
+ random_tensor.div_(keep_prob)
+ return x * random_tensor
+
+
+# Lightly adapted from
+# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
+class MLP(nn.Module):
+ def __init__(
+ self,
+ input_dim: int,
+ hidden_dim: int,
+ output_dim: int,
+ num_layers: int,
+ activation: nn.Module = nn.ReLU,
+ sigmoid_output: bool = False,
+ ) -> None:
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+ self.sigmoid_output = sigmoid_output
+ self.act = activation()
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
+ if self.sigmoid_output:
+ x = F.sigmoid(x)
+ return x
+
+
+# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
+# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
+class LayerNorm2d(nn.Module):
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(num_channels))
+ self.bias = nn.Parameter(torch.zeros(num_channels))
+ self.eps = eps
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
+
+
+def sample_box_points(
+ masks: torch.Tensor,
+ noise: float = 0.1, # SAM default
+ noise_bound: int = 20, # SAM default
+ top_left_label: int = 2,
+ bottom_right_label: int = 3,
+) -> Tuple[np.array, np.array]:
+ """
+ Sample a noised version of the top left and bottom right corners of a given `bbox`
+
+ Inputs:
+ - masks: [B, 1, H,W] boxes, dtype=torch.Tensor
+ - noise: noise as a fraction of box width and height, dtype=float
+ - noise_bound: maximum amount of noise (in pure pixesl), dtype=int
+
+ Returns:
+ - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
+ - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
+ """
+ device = masks.device
+ box_coords = mask_to_box(masks)
+ B, _, H, W = masks.shape
+ box_labels = torch.tensor([top_left_label, bottom_right_label], dtype=torch.int, device=device).repeat(B)
+ if noise > 0.0:
+ if not isinstance(noise_bound, torch.Tensor):
+ noise_bound = torch.tensor(noise_bound, device=device)
+ bbox_w = box_coords[..., 2] - box_coords[..., 0]
+ bbox_h = box_coords[..., 3] - box_coords[..., 1]
+ max_dx = torch.min(bbox_w * noise, noise_bound)
+ max_dy = torch.min(bbox_h * noise, noise_bound)
+ box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
+ box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
+
+ box_coords = box_coords + box_noise
+ img_bounds = torch.tensor([W, H, W, H], device=device) - 1 # uncentered pixel coords
+ box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
+
+ box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
+ box_labels = box_labels.reshape(-1, 2)
+ return box_coords, box_labels
+
+
+def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1):
+ """
+ Sample `num_pt` random points (along with their labels) independently from the error regions.
+
+ Inputs:
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
+ - num_pt: int, number of points to sample independently for each of the B error maps
+
+ Outputs:
+ - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
+ - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
+ negative clicks
+ """
+ if pred_masks is None: # if pred_masks is not provided, treat it as empty
+ pred_masks = torch.zeros_like(gt_masks)
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
+ assert num_pt >= 0
+
+ B, _, H_im, W_im = gt_masks.shape
+ device = gt_masks.device
+
+ # false positive region, a new point sampled in this region should have
+ # negative label to correct the FP error
+ fp_masks = ~gt_masks & pred_masks
+ # false negative region, a new point sampled in this region should have
+ # positive label to correct the FN error
+ fn_masks = gt_masks & ~pred_masks
+ # whether the prediction completely match the ground-truth on each mask
+ all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
+ all_correct = all_correct[..., None, None]
+
+ # channel 0 is FP map, while channel 1 is FN map
+ pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
+ # sample a negative new click from FP region or a positive new click
+ # from FN region, depend on where the maximum falls,
+ # and in case the predictions are all correct (no FP or FN), we just
+ # sample a negative click from the background region
+ pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
+ pts_noise[..., 1] *= fn_masks
+ pts_idx = pts_noise.flatten(2).argmax(dim=2)
+ labels = (pts_idx % 2).to(torch.int32)
+ pts_idx = pts_idx // 2
+ pts_x = pts_idx % W_im
+ pts_y = pts_idx // W_im
+ points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
+ return points, labels
+
+
+def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True):
+ """
+ Sample 1 random point (along with its label) from the center of each error region,
+ that is, the point with the largest distance to the boundary of each error region.
+ This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
+
+ Inputs:
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
+ - padding: if True, pad with boundary of 1 px for distance transform
+
+ Outputs:
+ - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
+ - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
+ """
+ import cv2
+
+ if pred_masks is None:
+ pred_masks = torch.zeros_like(gt_masks)
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
+
+ B, _, _, W_im = gt_masks.shape
+ device = gt_masks.device
+
+ # false positive region, a new point sampled in this region should have
+ # negative label to correct the FP error
+ fp_masks = ~gt_masks & pred_masks
+ # false negative region, a new point sampled in this region should have
+ # positive label to correct the FN error
+ fn_masks = gt_masks & ~pred_masks
+
+ fp_masks = fp_masks.cpu().numpy()
+ fn_masks = fn_masks.cpu().numpy()
+ points = torch.zeros(B, 1, 2, dtype=torch.float)
+ labels = torch.ones(B, 1, dtype=torch.int32)
+ for b in range(B):
+ fn_mask = fn_masks[b, 0]
+ fp_mask = fp_masks[b, 0]
+ if padding:
+ fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
+ fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
+ # compute the distance of each point in FN/FP region to its boundary
+ fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
+ fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
+ if padding:
+ fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
+ fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
+
+ # take the point in FN/FP region with the largest distance to its boundary
+ fn_mask_dt_flat = fn_mask_dt.reshape(-1)
+ fp_mask_dt_flat = fp_mask_dt.reshape(-1)
+ fn_argmax = np.argmax(fn_mask_dt_flat)
+ fp_argmax = np.argmax(fp_mask_dt_flat)
+ is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
+ pt_idx = fn_argmax if is_positive else fp_argmax
+ points[b, 0, 0] = pt_idx % W_im # x
+ points[b, 0, 1] = pt_idx // W_im # y
+ labels[b, 0] = int(is_positive)
+
+ points = points.to(device)
+ labels = labels.to(device)
+ return points, labels
+
+
+def get_next_point(gt_masks, pred_masks, method):
+ if method == "uniform":
+ return sample_random_points_from_errors(gt_masks, pred_masks)
+ elif method == "center":
+ return sample_one_point_from_error_center(gt_masks, pred_masks)
+ else:
+ raise ValueError(f"unknown sampling method {method}")
diff --git a/projects/PCSegSAM2/sam2/sam2_hiera_b+.yaml b/projects/PCSegSAM2/sam2/sam2_hiera_b+.yaml
new file mode 120000
index 00000000..998d9c98
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_hiera_b+.yaml
@@ -0,0 +1 @@
+configs/sam2/sam2_hiera_b+.yaml
\ No newline at end of file
diff --git a/projects/PCSegSAM2/sam2/sam2_hiera_l.yaml b/projects/PCSegSAM2/sam2/sam2_hiera_l.yaml
new file mode 120000
index 00000000..c0e7e58e
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_hiera_l.yaml
@@ -0,0 +1 @@
+configs/sam2/sam2_hiera_l.yaml
\ No newline at end of file
diff --git a/projects/PCSegSAM2/sam2/sam2_hiera_s.yaml b/projects/PCSegSAM2/sam2/sam2_hiera_s.yaml
new file mode 120000
index 00000000..41896a26
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_hiera_s.yaml
@@ -0,0 +1 @@
+configs/sam2/sam2_hiera_s.yaml
\ No newline at end of file
diff --git a/projects/PCSegSAM2/sam2/sam2_hiera_t.yaml b/projects/PCSegSAM2/sam2/sam2_hiera_t.yaml
new file mode 120000
index 00000000..71ff3abb
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_hiera_t.yaml
@@ -0,0 +1 @@
+configs/sam2/sam2_hiera_t.yaml
\ No newline at end of file
diff --git a/projects/PCSegSAM2/sam2/sam2_image_predictor.py b/projects/PCSegSAM2/sam2/sam2_image_predictor.py
new file mode 100644
index 00000000..2362ce42
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_image_predictor.py
@@ -0,0 +1,430 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from PIL.Image import Image
+from sam2.modeling.sam2_base import SAM2Base
+from sam2.utils.transforms import SAM2Transforms
+
+
+class SAM2ImagePredictor:
+ def __init__(
+ self,
+ sam_model: SAM2Base,
+ mask_threshold=0.0,
+ max_hole_area=0.0,
+ max_sprinkle_area=0.0,
+ **kwargs,
+ ) -> None:
+ """
+ Uses SAM-2 to calculate the image embedding for an image, and then
+ allow repeated, efficient mask prediction given prompts.
+
+ Arguments:
+ sam_model (Sam-2): The model to use for mask prediction.
+ mask_threshold (float): The threshold to use when converting mask logits
+ to binary masks. Masks are thresholded at 0 by default.
+ max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
+ the maximum area of max_hole_area in low_res_masks.
+ max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
+ the maximum area of max_sprinkle_area in low_res_masks.
+ """
+ super().__init__()
+ self.model = sam_model
+ self._transforms = SAM2Transforms(
+ resolution=self.model.image_size,
+ mask_threshold=mask_threshold,
+ max_hole_area=max_hole_area,
+ max_sprinkle_area=max_sprinkle_area,
+ )
+
+ # Predictor state
+ self._is_image_set = False
+ self._features = None
+ self._orig_hw = None
+ # Whether the predictor is set for single image or a batch of images
+ self._is_batch = False
+
+ # Predictor config
+ self.mask_threshold = mask_threshold
+
+ # Spatial dim for backbone feature maps
+ self._bb_feat_sizes = [
+ (256, 256),
+ (128, 128),
+ (64, 64),
+ ]
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2ImagePredictor): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_hf
+
+ sam_model = build_sam2_hf(model_id, **kwargs)
+ return cls(sam_model, **kwargs)
+
+ @torch.no_grad()
+ def set_image(
+ self,
+ image: Union[np.ndarray, Image],
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image, allowing
+ masks to be predicted with the 'predict' method.
+
+ Arguments:
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
+ with pixel values in [0, 255].
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
+ """
+ self.reset_predictor()
+ # Transform the image to the form expected by the model
+ if isinstance(image, np.ndarray):
+ logging.info("For numpy array image, we assume (HxWxC) format")
+ self._orig_hw = [image.shape[:2]]
+ elif isinstance(image, Image):
+ w, h = image.size
+ self._orig_hw = [(h, w)]
+ else:
+ raise NotImplementedError("Image format not supported")
+
+ input_image = self._transforms(image)
+ input_image = input_image[None, ...].to(self.device)
+
+ assert (
+ len(input_image.shape) == 4 and input_image.shape[1] == 3
+ ), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
+ logging.info("Computing image embeddings for the provided image...")
+ backbone_out = self.model.forward_image(input_image)
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
+ if self.model.directly_add_no_mem_embed:
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
+
+ feats = [
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
+ ][::-1]
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
+ self._is_image_set = True
+ logging.info("Image embeddings computed.")
+
+ @torch.no_grad()
+ def set_image_batch(
+ self,
+ image_list: List[Union[np.ndarray]],
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image batch, allowing
+ masks to be predicted with the 'predict_batch' method.
+
+ Arguments:
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
+ with pixel values in [0, 255].
+ """
+ self.reset_predictor()
+ assert isinstance(image_list, list)
+ self._orig_hw = []
+ for image in image_list:
+ assert isinstance(
+ image, np.ndarray
+ ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
+ self._orig_hw.append(image.shape[:2])
+ # Transform the image to the form expected by the model
+ img_batch = self._transforms.forward_batch(image_list)
+ img_batch = img_batch.to(self.device)
+ batch_size = img_batch.shape[0]
+ assert (
+ len(img_batch.shape) == 4 and img_batch.shape[1] == 3
+ ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
+ logging.info("Computing image embeddings for the provided images...")
+ backbone_out = self.model.forward_image(img_batch)
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
+ if self.model.directly_add_no_mem_embed:
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
+
+ feats = [
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
+ ][::-1]
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
+ self._is_image_set = True
+ self._is_batch = True
+ logging.info("Image embeddings computed.")
+
+ def predict_batch(
+ self,
+ point_coords_batch: List[np.ndarray] = None,
+ point_labels_batch: List[np.ndarray] = None,
+ box_batch: List[np.ndarray] = None,
+ mask_input_batch: List[np.ndarray] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ normalize_coords=True,
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
+ It returns a tuple of lists of masks, ious, and low_res_masks_logits.
+ """
+ assert self._is_batch, "This function should only be used when in batched mode"
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image_batch(...) before mask prediction.")
+ num_images = len(self._features["image_embed"])
+ all_masks = []
+ all_ious = []
+ all_low_res_masks = []
+ for img_idx in range(num_images):
+ # Transform input prompts
+ point_coords = point_coords_batch[img_idx] if point_coords_batch is not None else None
+ point_labels = point_labels_batch[img_idx] if point_labels_batch is not None else None
+ box = box_batch[img_idx] if box_batch is not None else None
+ mask_input = mask_input_batch[img_idx] if mask_input_batch is not None else None
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
+ point_coords,
+ point_labels,
+ box,
+ mask_input,
+ normalize_coords,
+ img_idx=img_idx,
+ )
+ masks, iou_predictions, low_res_masks = self._predict(
+ unnorm_coords,
+ labels,
+ unnorm_box,
+ mask_input,
+ multimask_output,
+ return_logits=return_logits,
+ img_idx=img_idx,
+ )
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
+ all_masks.append(masks_np)
+ all_ious.append(iou_predictions_np)
+ all_low_res_masks.append(low_res_masks_np)
+
+ return all_masks, all_ious, all_low_res_masks
+
+ def predict(
+ self,
+ point_coords: Optional[np.ndarray] = None,
+ point_labels: Optional[np.ndarray] = None,
+ box: Optional[np.ndarray] = None,
+ mask_input: Optional[np.ndarray] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ normalize_coords=True,
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+
+ Arguments:
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (np.ndarray or None): A length N array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ box (np.ndarray or None): A length 4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form 1xHxW, where
+ for SAM, H=W=256.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
+
+ Returns:
+ (np.ndarray): The output masks in CxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (np.ndarray): An array of length C containing the model's
+ predictions for the quality of each mask.
+ (np.ndarray): An array of shape CxHxW, where C is the number
+ of masks and H=W=256. These low resolution logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
+
+ # Transform input prompts
+
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
+ point_coords, point_labels, box, mask_input, normalize_coords
+ )
+
+ masks, iou_predictions, low_res_masks = self._predict(
+ unnorm_coords,
+ labels,
+ unnorm_box,
+ mask_input,
+ multimask_output,
+ return_logits=return_logits,
+ )
+
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
+ return masks_np, iou_predictions_np, low_res_masks_np
+
+ def _prep_prompts(self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1):
+
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
+ if point_coords is not None:
+ assert point_labels is not None, "point_labels must be supplied if point_coords is supplied."
+ point_coords = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
+ unnorm_coords = self._transforms.transform_coords(
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
+ )
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
+ if len(unnorm_coords.shape) == 2:
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
+ if box is not None:
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
+ unnorm_box = self._transforms.transform_boxes(
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
+ ) # Bx2x2
+ if mask_logits is not None:
+ mask_input = torch.as_tensor(mask_logits, dtype=torch.float, device=self.device)
+ if len(mask_input.shape) == 3:
+ mask_input = mask_input[None, :, :, :]
+ return mask_input, unnorm_coords, labels, unnorm_box
+
+ @torch.no_grad()
+ def _predict(
+ self,
+ point_coords: Optional[torch.Tensor],
+ point_labels: Optional[torch.Tensor],
+ boxes: Optional[torch.Tensor] = None,
+ mask_input: Optional[torch.Tensor] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ img_idx: int = -1,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+ Input prompts are batched torch tensors and are expected to already be
+ transformed to the input frame using SAM2Transforms.
+
+ Arguments:
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (torch.Tensor or None): A BxN array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
+ for SAM, H=W=256. Masks returned by a previous iteration of the
+ predict method do not need further transformation.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+
+ Returns:
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (torch.Tensor): An array of shape BxC containing the model's
+ predictions for the quality of each mask.
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
+ of masks and H=W=256. These low res logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
+
+ if point_coords is not None:
+ concat_points = (point_coords, point_labels)
+ else:
+ concat_points = None
+
+ # Embed prompts
+ if boxes is not None:
+ box_coords = boxes.reshape(-1, 2, 2)
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
+ box_labels = box_labels.repeat(boxes.size(0), 1)
+ # we merge "boxes" and "points" into a single "concat_points" input (where
+ # boxes are added at the beginning) to sam_prompt_encoder
+ if concat_points is not None:
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
+ concat_points = (concat_coords, concat_labels)
+ else:
+ concat_points = (box_coords, box_labels)
+
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
+ points=concat_points,
+ boxes=None,
+ masks=mask_input,
+ )
+
+ # Predict masks
+ batched_mode = concat_points is not None and concat_points[0].shape[0] > 1 # multi object prediction
+ high_res_features = [feat_level[img_idx].unsqueeze(0) for feat_level in self._features["high_res_feats"]]
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ repeat_image=batched_mode,
+ high_res_features=high_res_features,
+ )
+
+ # Upscale the masks to the original image resolution
+ masks = self._transforms.postprocess_masks(low_res_masks, self._orig_hw[img_idx])
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
+ if not return_logits:
+ masks = masks > self.mask_threshold
+
+ return masks, iou_predictions, low_res_masks
+
+ def get_image_embedding(self) -> torch.Tensor:
+ """
+ Returns the image embeddings for the currently set image, with
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) to generate an embedding.")
+ assert self._features is not None, "Features must exist if an image has been set."
+ return self._features["image_embed"]
+
+ @property
+ def device(self) -> torch.device:
+ return self.model.device
+
+ def reset_predictor(self) -> None:
+ """
+ Resets the image embeddings and other state variables.
+ """
+ self._is_image_set = False
+ self._features = None
+ self._orig_hw = None
+ self._is_batch = False
diff --git a/projects/PCSegSAM2/sam2/sam2_video_predictor.py b/projects/PCSegSAM2/sam2/sam2_video_predictor.py
new file mode 100644
index 00000000..8e605be1
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/sam2_video_predictor.py
@@ -0,0 +1,1123 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import warnings
+from collections import OrderedDict
+
+import torch
+from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
+from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
+from tqdm import tqdm
+
+
+class SAM2VideoPredictor(SAM2Base):
+ """The predictor class to handle user interactions and manage inference states."""
+
+ def __init__(
+ self,
+ fill_hole_area=0,
+ # whether to apply non-overlapping constraints on the output object masks
+ non_overlap_masks=False,
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
+ clear_non_cond_mem_around_input=False,
+ # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True).
+ clear_non_cond_mem_for_multi_obj=False,
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
+ add_all_frames_to_correct_as_cond=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.fill_hole_area = fill_hole_area
+ self.non_overlap_masks = non_overlap_masks
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
+ self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
+
+ @torch.inference_mode()
+ def init_state(
+ self,
+ video_path,
+ offload_video_to_cpu=False,
+ offload_state_to_cpu=False,
+ async_loading_frames=False,
+ ):
+ """Initialize an inference state."""
+ compute_device = self.device # device of the model
+ images, video_height, video_width = load_video_frames(
+ video_path=video_path,
+ image_size=self.image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ async_loading_frames=async_loading_frames,
+ compute_device=compute_device,
+ )
+ inference_state = {}
+ inference_state["images"] = images
+ inference_state["num_frames"] = len(images)
+ # whether to offload the video frames to CPU memory
+ # turning on this option saves the GPU memory with only a very small overhead
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
+ # whether to offload the inference state to CPU memory
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
+ # and from 24 to 21 when tracking two objects)
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
+ # the original video height and width, used for resizing final output scores
+ inference_state["video_height"] = video_height
+ inference_state["video_width"] = video_width
+ inference_state["device"] = compute_device
+ if offload_state_to_cpu:
+ inference_state["storage_device"] = torch.device("cpu")
+ else:
+ inference_state["storage_device"] = compute_device
+ # inputs on each frame
+ inference_state["point_inputs_per_obj"] = {}
+ inference_state["mask_inputs_per_obj"] = {}
+ # visual features on a small number of recently visited frames for quick interactions
+ inference_state["cached_features"] = {}
+ # values that don't change across frames (so we only need to hold one copy of them)
+ inference_state["constants"] = {}
+ # mapping between client-side object id and model-side object index
+ inference_state["obj_id_to_idx"] = OrderedDict()
+ inference_state["obj_idx_to_id"] = OrderedDict()
+ inference_state["obj_ids"] = []
+ # A storage to hold the model's tracking results and states on each frame
+ inference_state["output_dict"] = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
+ inference_state["output_dict_per_obj"] = {}
+ # A temporary storage to hold new outputs when user interact with a frame
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
+ inference_state["temp_output_dict_per_obj"] = {}
+ # Frames that already holds consolidated outputs from click or mask inputs
+ # (we directly use their consolidated outputs during tracking)
+ inference_state["consolidated_frame_inds"] = {
+ "cond_frame_outputs": set(), # set containing frame indices
+ "non_cond_frame_outputs": set(), # set containing frame indices
+ }
+ # metadata for each tracking frame (e.g. which direction it's tracked)
+ inference_state["tracking_has_started"] = False
+ inference_state["frames_already_tracked"] = {}
+ # Warm up the visual backbone and cache the image feature on frame 0
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
+ return inference_state
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2VideoPredictor): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_video_predictor_hf
+
+ sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
+ return sam_model
+
+ def _obj_id_to_idx(self, inference_state, obj_id):
+ """Map client-side object id to model-side object index."""
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
+ if obj_idx is not None:
+ return obj_idx
+
+ # This is a new object id not sent to the server before. We only allow adding
+ # new objects *before* the tracking starts.
+ allow_new_object = not inference_state["tracking_has_started"]
+ if allow_new_object:
+ # get the next object slot
+ obj_idx = len(inference_state["obj_id_to_idx"])
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
+ # set up input and output structures for this object
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
+ inference_state["output_dict_per_obj"][obj_idx] = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ return obj_idx
+ else:
+ raise RuntimeError(
+ f"Cannot add new object id {obj_id} after tracking starts. "
+ f"All existing object ids: {inference_state['obj_ids']}. "
+ f"Please call 'reset_state' to restart from scratch."
+ )
+
+ def _obj_idx_to_id(self, inference_state, obj_idx):
+ """Map model-side object index to client-side object id."""
+ return inference_state["obj_idx_to_id"][obj_idx]
+
+ def _get_obj_num(self, inference_state):
+ """Get the total number of unique object ids received so far in this session."""
+ return len(inference_state["obj_idx_to_id"])
+
+ @torch.inference_mode()
+ def add_new_points_or_box(
+ self,
+ inference_state,
+ frame_idx,
+ obj_id,
+ points=None,
+ labels=None,
+ clear_old_points=True,
+ normalize_coords=True,
+ box=None,
+ ):
+ """Add new points to a frame."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
+
+ if (points is not None) != (labels is not None):
+ raise ValueError("points and labels must be provided together")
+ if points is None and box is None:
+ raise ValueError("at least one of points or box must be provided as input")
+
+ if points is None:
+ points = torch.zeros(0, 2, dtype=torch.float32)
+ elif not isinstance(points, torch.Tensor):
+ points = torch.tensor(points, dtype=torch.float32)
+ if labels is None:
+ labels = torch.zeros(0, dtype=torch.int32)
+ elif not isinstance(labels, torch.Tensor):
+ labels = torch.tensor(labels, dtype=torch.int32)
+ if points.dim() == 2:
+ points = points.unsqueeze(0) # add batch dimension
+ if labels.dim() == 1:
+ labels = labels.unsqueeze(0) # add batch dimension
+
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
+ # along with the user-provided points (consistent with how SAM 2 is trained).
+ if box is not None:
+ if not clear_old_points:
+ raise ValueError(
+ "cannot add box without clearing old points, since "
+ "box prompt must be provided before any point prompt "
+ "(please use clear_old_points=True instead)"
+ )
+ if inference_state["tracking_has_started"]:
+ warnings.warn(
+ "You are adding a box after tracking starts. SAM 2 may not always be "
+ "able to incorporate a box prompt for *refinement*. If you intend to "
+ "use box prompt as an *initial* input before tracking, please call "
+ "'reset_state' on the inference state to restart from scratch.",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ if not isinstance(box, torch.Tensor):
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
+ box_coords = box.reshape(1, 2, 2)
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
+ box_labels = box_labels.reshape(1, 2)
+ points = torch.cat([box_coords, points], dim=1)
+ labels = torch.cat([box_labels, labels], dim=1)
+
+ if normalize_coords:
+ video_H = inference_state["video_height"]
+ video_W = inference_state["video_width"]
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
+ # scale the (normalized) coordinates by the model's internal image size
+ points = points * self.image_size
+ points = points.to(inference_state["device"])
+ labels = labels.to(inference_state["device"])
+
+ if not clear_old_points:
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
+ else:
+ point_inputs = None
+ point_inputs = concat_points(point_inputs, points, labels)
+
+ point_inputs_per_frame[frame_idx] = point_inputs
+ mask_inputs_per_frame.pop(frame_idx, None)
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
+ # frame, meaning that the inputs points are to generate segments on this frame without
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
+ # the input points will be used to correct the already tracked masks.
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
+ # whether to track in reverse time order
+ if is_init_cond_frame:
+ reverse = False
+ else:
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ # Add a frame to conditioning output if it's an initial conditioning frame or
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+
+ # Get any previously predicted mask logits on this object and feed it along with
+ # the new clicks into the SAM mask decoder.
+ prev_sam_mask_logits = None
+ # lookup temporary output dict first, which contains the most recent output
+ # (if not found, then lookup conditioning and non-conditioning frame output)
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
+
+ if prev_out is not None and prev_out["pred_masks"] is not None:
+ device = inference_state["device"]
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
+ current_out, _ = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict, # run on the slice of a single object
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=is_init_cond_frame,
+ point_inputs=point_inputs,
+ mask_inputs=None,
+ reverse=reverse,
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
+ # allows us to enforce non-overlapping constraints on all objects before encoding
+ # them into memory.
+ run_mem_encoder=False,
+ prev_sam_mask_logits=prev_sam_mask_logits,
+ )
+ # Add the output to the output dict (to be used as future memory)
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
+
+ # Resize the output mask to the original video resolution
+ obj_ids = inference_state["obj_ids"]
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ run_mem_encoder=False,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ def add_new_points(self, *args, **kwargs):
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
+ return self.add_new_points_or_box(*args, **kwargs)
+
+ @torch.inference_mode()
+ def add_new_mask(
+ self,
+ inference_state,
+ frame_idx,
+ obj_id,
+ mask,
+ ):
+ """Add new mask to a frame."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
+
+ if not isinstance(mask, torch.Tensor):
+ mask = torch.tensor(mask, dtype=torch.bool)
+ assert mask.dim() == 2
+ mask_H, mask_W = mask.shape
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
+
+ # resize the mask if it doesn't match the model's image size
+ if mask_H != self.image_size or mask_W != self.image_size:
+ mask_inputs = torch.nn.functional.interpolate(
+ mask_inputs_orig,
+ size=(self.image_size, self.image_size),
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ mask_inputs = (mask_inputs >= 0.5).float()
+ else:
+ mask_inputs = mask_inputs_orig
+
+ mask_inputs_per_frame[frame_idx] = mask_inputs
+ point_inputs_per_frame.pop(frame_idx, None)
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
+ # frame, meaning that the inputs points are to generate segments on this frame without
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
+ # the input points will be used to correct the already tracked masks.
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
+ # whether to track in reverse time order
+ if is_init_cond_frame:
+ reverse = False
+ else:
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ # Add a frame to conditioning output if it's an initial conditioning frame or
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+
+ current_out, _ = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict, # run on the slice of a single object
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=is_init_cond_frame,
+ point_inputs=None,
+ mask_inputs=mask_inputs,
+ reverse=reverse,
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
+ # allows us to enforce non-overlapping constraints on all objects before encoding
+ # them into memory.
+ run_mem_encoder=False,
+ )
+ # Add the output to the output dict (to be used as future memory)
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
+
+ # Resize the output mask to the original video resolution
+ obj_ids = inference_state["obj_ids"]
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ run_mem_encoder=False,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
+ """
+ Resize the object scores to the original video resolution (video_res_masks)
+ and apply non-overlapping constraints for final output.
+ """
+ device = inference_state["device"]
+ video_H = inference_state["video_height"]
+ video_W = inference_state["video_width"]
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
+ if any_res_masks.shape[-2:] == (video_H, video_W):
+ video_res_masks = any_res_masks
+ else:
+ video_res_masks = torch.nn.functional.interpolate(
+ any_res_masks,
+ size=(video_H, video_W),
+ mode="bilinear",
+ align_corners=False,
+ )
+ if self.non_overlap_masks:
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
+ return any_res_masks, video_res_masks
+
+ def _consolidate_temp_output_across_obj(
+ self,
+ inference_state,
+ frame_idx,
+ is_cond,
+ run_mem_encoder,
+ consolidate_at_video_res=False,
+ ):
+ """
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
+ a frame into a single output for all objects, including
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
+ (if they don't exist in `output_dict_per_obj` for this frame);
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
+ on the object scores.
+ """
+ batch_size = self._get_obj_num(inference_state)
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+ # Optionally, we allow consolidating the temporary outputs at the original
+ # video resolution (to provide a better editing experience for mask prompts).
+ if consolidate_at_video_res:
+ assert not run_mem_encoder, "memory encoder cannot run at video resolution"
+ consolidated_H = inference_state["video_height"]
+ consolidated_W = inference_state["video_width"]
+ consolidated_mask_key = "pred_masks_video_res"
+ else:
+ consolidated_H = consolidated_W = self.image_size // 4
+ consolidated_mask_key = "pred_masks"
+
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
+ # will be added when rerunning the memory encoder after applying non-overlapping
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
+ consolidated_out = {
+ "maskmem_features": None,
+ "maskmem_pos_enc": None,
+ consolidated_mask_key: torch.full(
+ size=(batch_size, 1, consolidated_H, consolidated_W),
+ fill_value=NO_OBJ_SCORE,
+ dtype=torch.float32,
+ device=inference_state["storage_device"],
+ ),
+ "obj_ptr": torch.full(
+ size=(batch_size, self.hidden_dim),
+ fill_value=NO_OBJ_SCORE,
+ dtype=torch.float32,
+ device=inference_state["device"],
+ ),
+ "object_score_logits": torch.full(
+ size=(batch_size, 1),
+ # default to 10.0 for object_score_logits, i.e. assuming the object is
+ # present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder`
+ fill_value=10.0,
+ dtype=torch.float32,
+ device=inference_state["device"],
+ ),
+ }
+ empty_mask_ptr = None
+ for obj_idx in range(batch_size):
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
+ # we fall back and look up its previous output in "output_dict_per_obj".
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
+ # "output_dict_per_obj" to find a previous output for this object.
+ if out is None:
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
+ if out is None:
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
+ # placeholder above) and set its object pointer to be a dummy pointer.
+ if out is None:
+ # Fill in dummy object pointers for those objects without any inputs or
+ # tracking outcomes on this frame (only do it under `run_mem_encoder=True`,
+ # i.e. when we need to build the memory for tracking).
+ if run_mem_encoder:
+ if empty_mask_ptr is None:
+ empty_mask_ptr = self._get_empty_mask_ptr(inference_state, frame_idx)
+ # fill object pointer with a dummy pointer (based on an empty mask)
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr
+ continue
+ # Add the temporary object output mask to consolidated output mask
+ obj_mask = out["pred_masks"]
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
+ else:
+ # Resize first if temporary object mask has a different resolution
+ resized_obj_mask = torch.nn.functional.interpolate(
+ obj_mask,
+ size=consolidated_pred_masks.shape[-2:],
+ mode="bilinear",
+ align_corners=False,
+ )
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"]
+ consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out["object_score_logits"]
+
+ # Optionally, apply non-overlapping constraints on the consolidated scores
+ # and rerun the memory encoder
+ if run_mem_encoder:
+ device = inference_state["device"]
+ high_res_masks = torch.nn.functional.interpolate(
+ consolidated_out["pred_masks"].to(device, non_blocking=True),
+ size=(self.image_size, self.image_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+ if self.non_overlap_masks_for_mem_enc:
+ high_res_masks = self._apply_non_overlapping_constraints(high_res_masks)
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
+ inference_state=inference_state,
+ frame_idx=frame_idx,
+ batch_size=batch_size,
+ high_res_masks=high_res_masks,
+ object_score_logits=consolidated_out["object_score_logits"],
+ is_mask_from_pts=True, # these frames are what the user interacted with
+ )
+ consolidated_out["maskmem_features"] = maskmem_features
+ consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc
+
+ return consolidated_out
+
+ def _get_empty_mask_ptr(self, inference_state, frame_idx):
+ """Get a dummy object pointer based on an empty mask on the current frame."""
+ # A dummy (empty) mask with a single object
+ batch_size = 1
+ mask_inputs = torch.zeros(
+ (batch_size, 1, self.image_size, self.image_size),
+ dtype=torch.float32,
+ device=inference_state["device"],
+ )
+
+ # Retrieve correct image features
+ (
+ _,
+ _,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
+
+ # Feed the empty mask and image feature above to get a dummy object pointer
+ current_out = self.track_step(
+ frame_idx=frame_idx,
+ is_init_cond_frame=True,
+ current_vision_feats=current_vision_feats,
+ current_vision_pos_embeds=current_vision_pos_embeds,
+ feat_sizes=feat_sizes,
+ point_inputs=None,
+ mask_inputs=mask_inputs,
+ output_dict={},
+ num_frames=inference_state["num_frames"],
+ track_in_reverse=False,
+ run_mem_encoder=False,
+ prev_sam_mask_logits=None,
+ )
+ return current_out["obj_ptr"]
+
+ @torch.inference_mode()
+ def propagate_in_video_preflight(self, inference_state):
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
+ # Tracking has started and we don't allow adding new objects until session is reset.
+ inference_state["tracking_has_started"] = True
+ batch_size = self._get_obj_num(inference_state)
+
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
+ # add them into "output_dict".
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
+ output_dict = inference_state["output_dict"]
+ # "consolidated_frame_inds" contains indices of those frames where consolidated
+ # temporary outputs have been added (either in this call or any previous calls
+ # to `propagate_in_video_preflight`).
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
+ for is_cond in [False, True]:
+ # Separately consolidate conditioning and non-conditioning temp outputs
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+ # Find all the frames that contain temporary outputs for any objects
+ # (these should be the frames that have just received clicks for mask inputs
+ # via `add_new_points_or_box` or `add_new_mask`)
+ temp_frame_inds = set()
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
+ temp_frame_inds.update(obj_temp_output_dict[storage_key].keys())
+ consolidated_frame_inds[storage_key].update(temp_frame_inds)
+ # consolidate the temporary output across all objects on this frame
+ for frame_idx in temp_frame_inds:
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True
+ )
+ # merge them into "output_dict" and also create per-object slices
+ output_dict[storage_key][frame_idx] = consolidated_out
+ self._add_output_per_object(inference_state, frame_idx, consolidated_out, storage_key)
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
+ )
+ if clear_non_cond_mem:
+ # clear non-conditioning memory of the surrounding frames
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
+
+ # clear temporary outputs in `temp_output_dict_per_obj`
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
+ obj_temp_output_dict[storage_key].clear()
+
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
+ # output on the same frame in "non_cond_frame_outputs"
+ for frame_idx in output_dict["cond_frame_outputs"]:
+ output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
+ for frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
+ assert frame_idx in output_dict["cond_frame_outputs"]
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
+
+ # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames
+ # with either points or mask inputs (which should be true under a correct workflow).
+ all_consolidated_frame_inds = (
+ consolidated_frame_inds["cond_frame_outputs"] | consolidated_frame_inds["non_cond_frame_outputs"]
+ )
+ input_frames_inds = set()
+ for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values():
+ input_frames_inds.update(point_inputs_per_frame.keys())
+ for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values():
+ input_frames_inds.update(mask_inputs_per_frame.keys())
+ assert all_consolidated_frame_inds == input_frames_inds
+
+ @torch.inference_mode()
+ def propagate_in_video(
+ self,
+ inference_state,
+ start_frame_idx=None,
+ max_frame_num_to_track=None,
+ reverse=False,
+ ):
+ """Propagate the input points across frames to track in the entire video."""
+ self.propagate_in_video_preflight(inference_state)
+
+ output_dict = inference_state["output_dict"]
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
+ obj_ids = inference_state["obj_ids"]
+ num_frames = inference_state["num_frames"]
+ batch_size = self._get_obj_num(inference_state)
+ if len(output_dict["cond_frame_outputs"]) == 0:
+ raise RuntimeError("No points are provided; please add points first")
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
+ )
+
+ # set start index, end index, and processing order
+ if start_frame_idx is None:
+ # default: start from the earliest frame with input points
+ start_frame_idx = min(output_dict["cond_frame_outputs"])
+ if max_frame_num_to_track is None:
+ # default: track all the frames in the video
+ max_frame_num_to_track = num_frames
+ if reverse:
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
+ if start_frame_idx > 0:
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
+ else:
+ processing_order = [] # skip reverse tracking if starting from frame 0
+ else:
+ end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1)
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
+
+ for frame_idx in tqdm(processing_order, desc="propagate in video"):
+ # We skip those frames already in consolidated outputs (these are frames
+ # that received input clicks or mask). Note that we cannot directly run
+ # batched forward on them via `_run_single_frame_inference` because the
+ # number of clicks on each object might be different.
+ if frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
+ storage_key = "cond_frame_outputs"
+ current_out = output_dict[storage_key][frame_idx]
+ pred_masks = current_out["pred_masks"]
+ if clear_non_cond_mem:
+ # clear non-conditioning memory of the surrounding frames
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
+ elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]:
+ storage_key = "non_cond_frame_outputs"
+ current_out = output_dict[storage_key][frame_idx]
+ pred_masks = current_out["pred_masks"]
+ else:
+ storage_key = "non_cond_frame_outputs"
+ current_out, pred_masks = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=output_dict,
+ frame_idx=frame_idx,
+ batch_size=batch_size,
+ is_init_cond_frame=False,
+ point_inputs=None,
+ mask_inputs=None,
+ reverse=reverse,
+ run_mem_encoder=True,
+ )
+ output_dict[storage_key][frame_idx] = current_out
+ # Create slices of per-object outputs for subsequent interaction with each
+ # individual object after tracking.
+ self._add_output_per_object(inference_state, frame_idx, current_out, storage_key)
+ inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse}
+
+ # Resize the output mask to the original video resolution (we directly use
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, pred_masks)
+ yield frame_idx, obj_ids, video_res_masks
+
+ def _add_output_per_object(self, inference_state, frame_idx, current_out, storage_key):
+ """
+ Split a multi-object output into per-object output slices and add them into
+ `output_dict_per_obj`. The resulting slices share the same tensor storage.
+ """
+ maskmem_features = current_out["maskmem_features"]
+ assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor)
+
+ maskmem_pos_enc = current_out["maskmem_pos_enc"]
+ assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list)
+
+ output_dict_per_obj = inference_state["output_dict_per_obj"]
+ for obj_idx, obj_output_dict in output_dict_per_obj.items():
+ obj_slice = slice(obj_idx, obj_idx + 1)
+ obj_out = {
+ "maskmem_features": None,
+ "maskmem_pos_enc": None,
+ "pred_masks": current_out["pred_masks"][obj_slice],
+ "obj_ptr": current_out["obj_ptr"][obj_slice],
+ "object_score_logits": current_out["object_score_logits"][obj_slice],
+ }
+ if maskmem_features is not None:
+ obj_out["maskmem_features"] = maskmem_features[obj_slice]
+ if maskmem_pos_enc is not None:
+ obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc]
+ obj_output_dict[storage_key][frame_idx] = obj_out
+
+ @torch.inference_mode()
+ def clear_all_prompts_in_frame(self, inference_state, frame_idx, obj_id, need_output=True):
+ """Remove all input points or mask in a specific frame for a given object."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+
+ # Clear the conditioning information on the given frame
+ inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
+ inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
+
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
+ temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
+ temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
+
+ # Check and see if there are still any inputs left on this frame
+ batch_size = self._get_obj_num(inference_state)
+ frame_has_input = False
+ for obj_idx2 in range(batch_size):
+ if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]:
+ frame_has_input = True
+ break
+ if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]:
+ frame_has_input = True
+ break
+
+ # If this frame has no remaining inputs for any objects, we further clear its
+ # conditioning frame status
+ if not frame_has_input:
+ output_dict = inference_state["output_dict"]
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
+ consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx)
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
+ # Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
+ out = output_dict["cond_frame_outputs"].pop(frame_idx, None)
+ if out is not None:
+ # The frame is not a conditioning frame anymore since it's not receiving inputs,
+ # so we "downgrade" its output (if exists) to a non-conditioning frame output.
+ output_dict["non_cond_frame_outputs"][frame_idx] = out
+ inference_state["frames_already_tracked"].pop(frame_idx, None)
+ # Similarly, do it for the sliced output on each object.
+ for obj_idx2 in range(batch_size):
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2]
+ obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
+ if obj_out is not None:
+ obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out
+
+ # If all the conditioning frames have been removed, we also clear the tracking outputs
+ if len(output_dict["cond_frame_outputs"]) == 0:
+ self._reset_tracking_results(inference_state)
+
+ if not need_output:
+ return
+ # Finally, output updated masks per object (after removing the inputs above)
+ obj_ids = inference_state["obj_ids"]
+ is_cond = any(
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
+ )
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ run_mem_encoder=False,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ @torch.inference_mode()
+ def reset_state(self, inference_state):
+ """Remove all input points or mask in all frames throughout the video."""
+ self._reset_tracking_results(inference_state)
+ # Remove all object ids
+ inference_state["obj_id_to_idx"].clear()
+ inference_state["obj_idx_to_id"].clear()
+ inference_state["obj_ids"].clear()
+ inference_state["point_inputs_per_obj"].clear()
+ inference_state["mask_inputs_per_obj"].clear()
+ inference_state["output_dict_per_obj"].clear()
+ inference_state["temp_output_dict_per_obj"].clear()
+
+ def _reset_tracking_results(self, inference_state):
+ """Reset all tracking inputs and results across the videos."""
+ for v in inference_state["point_inputs_per_obj"].values():
+ v.clear()
+ for v in inference_state["mask_inputs_per_obj"].values():
+ v.clear()
+ for v in inference_state["output_dict_per_obj"].values():
+ v["cond_frame_outputs"].clear()
+ v["non_cond_frame_outputs"].clear()
+ for v in inference_state["temp_output_dict_per_obj"].values():
+ v["cond_frame_outputs"].clear()
+ v["non_cond_frame_outputs"].clear()
+ inference_state["output_dict"]["cond_frame_outputs"].clear()
+ inference_state["output_dict"]["non_cond_frame_outputs"].clear()
+ inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear()
+ inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear()
+ inference_state["tracking_has_started"] = False
+ inference_state["frames_already_tracked"].clear()
+
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
+ """Compute the image features on a given frame."""
+ # Look up in the cache first
+ image, backbone_out = inference_state["cached_features"].get(frame_idx, (None, None))
+ if backbone_out is None:
+ # Cache miss -- we will run inference on a single image
+ device = inference_state["device"]
+ image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0)
+ backbone_out = self.forward_image(image)
+ # Cache the most recent frame's feature (for repeated interactions with
+ # a frame; we can use an LRU cache for more frames in the future).
+ inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
+
+ # expand the features to have the same dimension as the number of objects
+ expanded_image = image.expand(batch_size, -1, -1, -1)
+ expanded_backbone_out = {
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
+ }
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(batch_size, -1, -1, -1)
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
+ pos = pos.expand(batch_size, -1, -1, -1)
+ expanded_backbone_out["vision_pos_enc"][i] = pos
+
+ features = self._prepare_backbone_features(expanded_backbone_out)
+ features = (expanded_image,) + features
+ return features
+
+ def _run_single_frame_inference(
+ self,
+ inference_state,
+ output_dict,
+ frame_idx,
+ batch_size,
+ is_init_cond_frame,
+ point_inputs,
+ mask_inputs,
+ reverse,
+ run_mem_encoder,
+ prev_sam_mask_logits=None,
+ ):
+ """Run tracking on a single frame based on current inputs and previous memory."""
+ # Retrieve correct image features
+ (
+ _,
+ _,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
+
+ # point and mask should not appear as input simultaneously on the same frame
+ assert point_inputs is None or mask_inputs is None
+ current_out = self.track_step(
+ frame_idx=frame_idx,
+ is_init_cond_frame=is_init_cond_frame,
+ current_vision_feats=current_vision_feats,
+ current_vision_pos_embeds=current_vision_pos_embeds,
+ feat_sizes=feat_sizes,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ output_dict=output_dict,
+ num_frames=inference_state["num_frames"],
+ track_in_reverse=reverse,
+ run_mem_encoder=run_mem_encoder,
+ prev_sam_mask_logits=prev_sam_mask_logits,
+ )
+
+ # optionally offload the output to CPU memory to save GPU space
+ storage_device = inference_state["storage_device"]
+ maskmem_features = current_out["maskmem_features"]
+ if maskmem_features is not None:
+ maskmem_features = maskmem_features.to(torch.bfloat16)
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
+ pred_masks_gpu = current_out["pred_masks"]
+ # potentially fill holes in the predicted masks
+ if self.fill_hole_area > 0:
+ pred_masks_gpu = fill_holes_in_mask_scores(pred_masks_gpu, self.fill_hole_area)
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
+ obj_ptr = current_out["obj_ptr"]
+ object_score_logits = current_out["object_score_logits"]
+ # make a compact version of this frame's output to reduce the state size
+ compact_current_out = {
+ "maskmem_features": maskmem_features,
+ "maskmem_pos_enc": maskmem_pos_enc,
+ "pred_masks": pred_masks,
+ "obj_ptr": obj_ptr,
+ "object_score_logits": object_score_logits,
+ }
+ return compact_current_out, pred_masks_gpu
+
+ def _run_memory_encoder(
+ self,
+ inference_state,
+ frame_idx,
+ batch_size,
+ high_res_masks,
+ object_score_logits,
+ is_mask_from_pts,
+ ):
+ """
+ Run the memory encoder on `high_res_masks`. This is usually after applying
+ non-overlapping constraints to object scores. Since their scores changed, their
+ memory also need to be computed again with the memory encoder.
+ """
+ # Retrieve correct image features
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(inference_state, frame_idx, batch_size)
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
+ current_vision_feats=current_vision_feats,
+ feat_sizes=feat_sizes,
+ pred_masks_high_res=high_res_masks,
+ object_score_logits=object_score_logits,
+ is_mask_from_pts=is_mask_from_pts,
+ )
+
+ # optionally offload the output to CPU memory to save GPU space
+ storage_device = inference_state["storage_device"]
+ maskmem_features = maskmem_features.to(torch.bfloat16)
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, {"maskmem_pos_enc": maskmem_pos_enc})
+ return maskmem_features, maskmem_pos_enc
+
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
+ """
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
+ a constant in the inference session to reduce session storage size.
+ """
+ model_constants = inference_state["constants"]
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
+ if out_maskmem_pos_enc is not None:
+ if "maskmem_pos_enc" not in model_constants:
+ assert isinstance(out_maskmem_pos_enc, list)
+ # only take the slice for one object, since it's same across objects
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
+ else:
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
+ # expand the cached maskmem_pos_enc to the actual batch size
+ batch_size = out_maskmem_pos_enc[0].size(0)
+ expanded_maskmem_pos_enc = [x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc]
+ else:
+ expanded_maskmem_pos_enc = None
+ return expanded_maskmem_pos_enc
+
+ @torch.inference_mode()
+ def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
+ """
+ Remove an object id from the tracking state. If strict is True, we check whether
+ the object id actually exists and raise an error if it doesn't exist.
+ """
+ old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
+ updated_frames = []
+ # Check whether this object_id to remove actually exists and possibly raise an error.
+ if old_obj_idx_to_rm is None:
+ if not strict:
+ return inference_state["obj_ids"], updated_frames
+ raise RuntimeError(
+ f"Cannot remove object id {obj_id} as it doesn't exist. "
+ f"All existing object ids: {inference_state['obj_ids']}."
+ )
+
+ # If this is the only remaining object id, we simply reset the state.
+ if len(inference_state["obj_id_to_idx"]) == 1:
+ self.reset_state(inference_state)
+ return inference_state["obj_ids"], updated_frames
+
+ # There are still remaining objects after removing this object id. In this case,
+ # we need to delete the object storage from inference state tensors.
+ # Step 0: clear the input on those frames where this object id has point or mask input
+ # (note that this step is required as it might downgrade conditioning frames to
+ # non-conditioning ones)
+ obj_input_frames_inds = set()
+ obj_input_frames_inds.update(inference_state["point_inputs_per_obj"][old_obj_idx_to_rm])
+ obj_input_frames_inds.update(inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm])
+ for frame_idx in obj_input_frames_inds:
+ self.clear_all_prompts_in_frame(inference_state, frame_idx, obj_id, need_output=False)
+
+ # Step 1: Update the object id mapping (note that it must be done after Step 0,
+ # since Step 0 still requires the old object id mappings in inference_state)
+ old_obj_ids = inference_state["obj_ids"]
+ old_obj_inds = list(range(len(old_obj_ids)))
+ remain_old_obj_inds = old_obj_inds.copy()
+ remain_old_obj_inds.remove(old_obj_idx_to_rm)
+ new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
+ new_obj_inds = list(range(len(new_obj_ids)))
+ # build new mappings
+ old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
+ inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
+ inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
+ inference_state["obj_ids"] = new_obj_ids
+
+ # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
+ # (note that "consolidated_frame_inds" doesn't need to be updated in this step as
+ # it's already handled in Step 0)
+ def _map_keys(container):
+ new_kvs = []
+ for k in old_obj_inds:
+ v = container.pop(k)
+ if k in old_idx_to_new_idx:
+ new_kvs.append((old_idx_to_new_idx[k], v))
+ container.update(new_kvs)
+
+ _map_keys(inference_state["point_inputs_per_obj"])
+ _map_keys(inference_state["mask_inputs_per_obj"])
+ _map_keys(inference_state["output_dict_per_obj"])
+ _map_keys(inference_state["temp_output_dict_per_obj"])
+
+ # Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices.
+ def _slice_state(output_dict, storage_key):
+ for frame_idx, out in output_dict[storage_key].items():
+ out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds]
+ out["maskmem_pos_enc"] = [x[remain_old_obj_inds] for x in out["maskmem_pos_enc"]]
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
+ out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out)
+ out["pred_masks"] = out["pred_masks"][remain_old_obj_inds]
+ out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds]
+ out["object_score_logits"] = out["object_score_logits"][remain_old_obj_inds]
+ # also update the per-object slices
+ self._add_output_per_object(inference_state, frame_idx, out, storage_key)
+
+ _slice_state(inference_state["output_dict"], "cond_frame_outputs")
+ _slice_state(inference_state["output_dict"], "non_cond_frame_outputs")
+
+ # Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which
+ # could show an updated mask for objects previously occluded by the object being removed
+ if need_output:
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
+ for frame_idx in obj_input_frames_inds:
+ is_cond = any(
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
+ )
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ run_mem_encoder=False,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(
+ inference_state, consolidated_out["pred_masks_video_res"]
+ )
+ updated_frames.append((frame_idx, video_res_masks))
+
+ return inference_state["obj_ids"], updated_frames
+
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
+ """
+ Remove the non-conditioning memory around the input frame. When users provide
+ correction clicks, the surrounding frames' non-conditioning memories can still
+ contain outdated object appearance information and could confuse the model.
+
+ This method clears those non-conditioning memories surrounding the interacted
+ frame to avoid giving the model both old and new information about the object.
+ """
+ r = self.memory_temporal_stride_for_eval
+ frame_idx_begin = frame_idx - r * self.num_maskmem
+ frame_idx_end = frame_idx + r * self.num_maskmem
+ output_dict = inference_state["output_dict"]
+ non_cond_frame_outputs = output_dict["non_cond_frame_outputs"]
+ for t in range(frame_idx_begin, frame_idx_end + 1):
+ non_cond_frame_outputs.pop(t, None)
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
+ obj_output_dict["non_cond_frame_outputs"].pop(t, None)
diff --git a/projects/PCSegSAM2/sam2/utils/__init__.py b/projects/PCSegSAM2/sam2/utils/__init__.py
new file mode 100644
index 00000000..5277f461
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/utils/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/projects/PCSegSAM2/sam2/utils/amg.py b/projects/PCSegSAM2/sam2/utils/amg.py
new file mode 100644
index 00000000..744d798d
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/utils/amg.py
@@ -0,0 +1,332 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from copy import deepcopy
+from itertools import product
+from typing import Any, Dict, Generator, ItemsView, List, Tuple
+
+import numpy as np
+import torch
+
+# Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py
+
+
+class MaskData:
+ """
+ A structure for storing masks and their related data in batched format.
+ Implements basic filtering and concatenation.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ for v in kwargs.values():
+ assert isinstance(
+ v, (list, np.ndarray, torch.Tensor)
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats = dict(**kwargs)
+
+ def __setitem__(self, key: str, item: Any) -> None:
+ assert isinstance(
+ item, (list, np.ndarray, torch.Tensor)
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats[key] = item
+
+ def __delitem__(self, key: str) -> None:
+ del self._stats[key]
+
+ def __getitem__(self, key: str) -> Any:
+ return self._stats[key]
+
+ def items(self) -> ItemsView[str, Any]:
+ return self._stats.items()
+
+ def filter(self, keep: torch.Tensor) -> None:
+ for k, v in self._stats.items():
+ if v is None:
+ self._stats[k] = None
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = v[keep.detach().cpu().numpy()]
+ elif isinstance(v, list) and keep.dtype == torch.bool:
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
+ elif isinstance(v, list):
+ self._stats[k] = [v[i] for i in keep]
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def cat(self, new_stats: "MaskData") -> None:
+ for k, v in new_stats.items():
+ if k not in self._stats or self._stats[k] is None:
+ self._stats[k] = deepcopy(v)
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
+ elif isinstance(v, list):
+ self._stats[k] = self._stats[k] + deepcopy(v)
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def to_numpy(self) -> None:
+ for k, v in self._stats.items():
+ if isinstance(v, torch.Tensor):
+ self._stats[k] = v.float().detach().cpu().numpy()
+
+
+def is_box_near_crop_edge(
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
+) -> torch.Tensor:
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
+ return torch.any(near_crop_edge, dim=1)
+
+
+def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
+ box_xywh = deepcopy(box_xyxy)
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
+ return box_xywh
+
+
+def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
+ assert len(args) > 0 and all(
+ len(a) == len(args[0]) for a in args
+ ), "Batched iteration must have inputs of all the same size."
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
+ for b in range(n_batches):
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
+
+
+def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
+ """
+ Encodes masks to an uncompressed RLE, in the format expected by
+ pycoco tools.
+ """
+ # Put in fortran order and flatten h,w
+ b, h, w = tensor.shape
+ tensor = tensor.permute(0, 2, 1).flatten(1)
+
+ # Compute change indices
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
+ change_indices = diff.nonzero()
+
+ # Encode run length
+ out = []
+ for i in range(b):
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
+ cur_idxs = torch.cat(
+ [
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ cur_idxs + 1,
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ ]
+ )
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
+ counts = [] if tensor[i, 0] == 0 else [0]
+ counts.extend(btw_idxs.detach().cpu().tolist())
+ out.append({"size": [h, w], "counts": counts})
+ return out
+
+
+def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
+ """Compute a binary mask from an uncompressed RLE."""
+ h, w = rle["size"]
+ mask = np.empty(h * w, dtype=bool)
+ idx = 0
+ parity = False
+ for count in rle["counts"]:
+ mask[idx : idx + count] = parity
+ idx += count
+ parity ^= True
+ mask = mask.reshape(w, h)
+ return mask.transpose() # Put in C order
+
+
+def area_from_rle(rle: Dict[str, Any]) -> int:
+ return sum(rle["counts"][1::2])
+
+
+def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
+ """
+ Computes the stability score for a batch of masks. The stability
+ score is the IoU between the binary masks obtained by thresholding
+ the predicted mask logits at high and low values.
+ """
+ # One mask is always contained inside the other.
+ # Save memory by preventing unnecessary cast to torch.int64
+ intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
+ unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
+ return intersections / unions
+
+
+def build_point_grid(n_per_side: int) -> np.ndarray:
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
+ offset = 1 / (2 * n_per_side)
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
+ return points
+
+
+def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:
+ """Generates point grids for all crop layers."""
+ points_by_layer = []
+ for i in range(n_layers + 1):
+ n_points = int(n_per_side / (scale_per_layer**i))
+ points_by_layer.append(build_point_grid(n_points))
+ return points_by_layer
+
+
+def generate_crop_boxes(
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
+) -> Tuple[List[List[int]], List[int]]:
+ """
+ Generates a list of crop boxes of different sizes. Each layer
+ has (2**i)**2 boxes for the ith layer.
+ """
+ crop_boxes, layer_idxs = [], []
+ im_h, im_w = im_size
+ short_side = min(im_h, im_w)
+
+ # Original image
+ crop_boxes.append([0, 0, im_w, im_h])
+ layer_idxs.append(0)
+
+ def crop_len(orig_len, n_crops, overlap):
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
+
+ for i_layer in range(n_layers):
+ n_crops_per_side = 2 ** (i_layer + 1)
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
+
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
+
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
+
+ # Crops in XYWH format
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
+ crop_boxes.append(box)
+ layer_idxs.append(i_layer + 1)
+
+ return crop_boxes, layer_idxs
+
+
+def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
+ # Check if boxes has a channel dimension
+ if len(boxes.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return boxes + offset
+
+
+def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0]], device=points.device)
+ # Check if points has a channel dimension
+ if len(points.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return points + offset
+
+
+def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:
+ x0, y0, x1, y1 = crop_box
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
+ return masks
+ # Coordinate transform masks
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
+ return torch.nn.functional.pad(masks, pad, value=0)
+
+
+def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:
+ """
+ Removes small disconnected regions and holes in a mask. Returns the
+ mask and an indicator of if the mask has been modified.
+ """
+ import cv2 # type: ignore
+
+ assert mode in ["holes", "islands"]
+ correct_holes = mode == "holes"
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
+ sizes = stats[:, -1][1:] # Row 0 is background label
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
+ if len(small_regions) == 0:
+ return mask, False
+ fill_labels = [0] + small_regions
+ if not correct_holes:
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
+ # If every region is below threshold, keep largest
+ if len(fill_labels) == 0:
+ fill_labels = [int(np.argmax(sizes)) + 1]
+ mask = np.isin(regions, fill_labels)
+ return mask, True
+
+
+def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
+ from pycocotools import mask as mask_utils # type: ignore
+
+ h, w = uncompressed_rle["size"]
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
+ return rle
+
+
+def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
+ """
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
+ """
+ # torch.max below raises an error on empty inputs, just skip in this case
+ if torch.numel(masks) == 0:
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
+
+ # Normalize shape to CxHxW
+ shape = masks.shape
+ h, w = shape[-2:]
+ if len(shape) > 2:
+ masks = masks.flatten(0, -3)
+ else:
+ masks = masks.unsqueeze(0)
+
+ # Get top and bottom edges
+ in_height, _ = torch.max(masks, dim=-1)
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
+ in_height_coords = in_height_coords + h * (~in_height)
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
+
+ # Get left and right edges
+ in_width, _ = torch.max(masks, dim=-2)
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
+ in_width_coords = in_width_coords + w * (~in_width)
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
+
+ # If the mask is empty the right edge will be to the left of the left edge.
+ # Replace these boxes with [0, 0, 0, 0]
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
+ out = out * (~empty_filter).unsqueeze(-1)
+
+ # Return to original shape
+ if len(shape) > 2:
+ out = out.reshape(*shape[:-2], 4)
+ else:
+ out = out[0]
+
+ return out
diff --git a/projects/PCSegSAM2/sam2/utils/misc.py b/projects/PCSegSAM2/sam2/utils/misc.py
new file mode 100644
index 00000000..2432548c
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/utils/misc.py
@@ -0,0 +1,341 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import warnings
+from threading import Thread
+
+import numpy as np
+import torch
+from PIL import Image
+from tqdm import tqdm
+
+
+def get_sdpa_settings():
+ if torch.cuda.is_available():
+ old_gpu = torch.cuda.get_device_properties(0).major < 7
+ # only use Flash Attention on Ampere (8.0) or newer GPUs
+ use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
+ if not use_flash_attn:
+ warnings.warn(
+ "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
+ # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
+ pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
+ if pytorch_version < (2, 2):
+ warnings.warn(
+ f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
+ "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
+ else:
+ old_gpu = True
+ use_flash_attn = False
+ math_kernel_on = True
+
+ return old_gpu, use_flash_attn, math_kernel_on
+
+
+def get_connected_components(mask):
+ """
+ Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W).
+
+ Inputs:
+ - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is
+ background.
+
+ Outputs:
+ - labels: A tensor of shape (N, 1, H, W) containing the connected component labels
+ for foreground pixels and 0 for background pixels.
+ - counts: A tensor of shape (N, 1, H, W) containing the area of the connected
+ components for foreground pixels and 0 for background pixels.
+ """
+ from sam2 import _C
+
+ return _C.get_connected_componnets(mask.to(torch.uint8).contiguous())
+
+
+def mask_to_box(masks: torch.Tensor):
+ """
+ compute bounding box given an input mask
+
+ Inputs:
+ - masks: [B, 1, H, W] masks, dtype=torch.Tensor
+
+ Returns:
+ - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
+ """
+ B, _, h, w = masks.shape
+ device = masks.device
+ xs = torch.arange(w, device=device, dtype=torch.int32)
+ ys = torch.arange(h, device=device, dtype=torch.int32)
+ grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
+ grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
+ grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
+ min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
+ max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
+ min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
+ max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
+ bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
+
+ return bbox_coords
+
+
+def _load_img_as_tensor(img_path, image_size):
+ img_pil = Image.open(img_path)
+ img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
+ if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
+ img_np = img_np / 255.0
+ else:
+ raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}")
+ img = torch.from_numpy(img_np).permute(2, 0, 1)
+ video_width, video_height = img_pil.size # the original video size
+ return img, video_height, video_width
+
+
+class AsyncVideoFrameLoader:
+ """
+ A list of video frames to be load asynchronously without blocking session start.
+ """
+
+ def __init__(
+ self,
+ img_paths,
+ image_size,
+ offload_video_to_cpu,
+ img_mean,
+ img_std,
+ compute_device,
+ ):
+ self.img_paths = img_paths
+ self.image_size = image_size
+ self.offload_video_to_cpu = offload_video_to_cpu
+ self.img_mean = img_mean
+ self.img_std = img_std
+ # items in `self.images` will be loaded asynchronously
+ self.images = [None] * len(img_paths)
+ # catch and raise any exceptions in the async loading thread
+ self.exception = None
+ # video_height and video_width be filled when loading the first image
+ self.video_height = None
+ self.video_width = None
+ self.compute_device = compute_device
+
+ # load the first frame to fill video_height and video_width and also
+ # to cache it (since it's most likely where the user will click)
+ self.__getitem__(0)
+
+ # load the rest of frames asynchronously without blocking the session start
+ def _load_frames():
+ try:
+ for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"):
+ self.__getitem__(n)
+ except Exception as e:
+ self.exception = e
+
+ self.thread = Thread(target=_load_frames, daemon=True)
+ self.thread.start()
+
+ def __getitem__(self, index):
+ if self.exception is not None:
+ raise RuntimeError("Failure in frame loading thread") from self.exception
+
+ img = self.images[index]
+ if img is not None:
+ return img
+
+ img, video_height, video_width = _load_img_as_tensor(self.img_paths[index], self.image_size)
+ self.video_height = video_height
+ self.video_width = video_width
+ # normalize by mean and std
+ img -= self.img_mean
+ img /= self.img_std
+ if not self.offload_video_to_cpu:
+ img = img.to(self.compute_device, non_blocking=True)
+ self.images[index] = img
+ return img
+
+ def __len__(self):
+ return len(self.images)
+
+
+def load_video_frames(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ async_loading_frames=False,
+ compute_device=torch.device("cuda"),
+):
+ """
+ Load the video frames from video_path. The frames are resized to image_size as in
+ the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo.
+ """
+ is_bytes = isinstance(video_path, bytes)
+ is_str = isinstance(video_path, str)
+ is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"]
+ if is_bytes or is_mp4_path:
+ return load_video_frames_from_video_file(
+ video_path=video_path,
+ image_size=image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ img_mean=img_mean,
+ img_std=img_std,
+ compute_device=compute_device,
+ )
+ elif is_str and os.path.isdir(video_path):
+ return load_video_frames_from_jpg_images(
+ video_path=video_path,
+ image_size=image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ img_mean=img_mean,
+ img_std=img_std,
+ async_loading_frames=async_loading_frames,
+ compute_device=compute_device,
+ )
+ else:
+ raise NotImplementedError("Only MP4 video and JPEG folder are supported at this moment")
+
+
+def load_video_frames_from_jpg_images(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ async_loading_frames=False,
+ compute_device=torch.device("cuda"),
+):
+ """
+ Load the video frames from a directory of JPEG files (".jpg" format).
+
+ The frames are resized to image_size x image_size and are loaded to GPU if
+ `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`.
+
+ You can load a frame asynchronously by setting `async_loading_frames` to `True`.
+ """
+ if isinstance(video_path, str) and os.path.isdir(video_path):
+ jpg_folder = video_path
+ else:
+ raise NotImplementedError(
+ "Only JPEG frames are supported at this moment. For video files, you may use "
+ "ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n"
+ "```\n"
+ "ffmpeg -i .mp4 -q:v 2 -start_number 0 /'%05d.jpg'\n"
+ "```\n"
+ "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks "
+ "ffmpeg to start the JPEG file from 00000.jpg."
+ )
+
+ frame_names = [p for p in os.listdir(jpg_folder) if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]]
+ frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
+ num_frames = len(frame_names)
+ if num_frames == 0:
+ raise RuntimeError(f"no images found in {jpg_folder}")
+ img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names]
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
+
+ if async_loading_frames:
+ lazy_images = AsyncVideoFrameLoader(
+ img_paths,
+ image_size,
+ offload_video_to_cpu,
+ img_mean,
+ img_std,
+ compute_device,
+ )
+ return lazy_images, lazy_images.video_height, lazy_images.video_width
+
+ images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32)
+ for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")):
+ images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size)
+ if not offload_video_to_cpu:
+ images = images.to(compute_device)
+ img_mean = img_mean.to(compute_device)
+ img_std = img_std.to(compute_device)
+ # normalize by mean and std
+ images -= img_mean
+ images /= img_std
+ return images, video_height, video_width
+
+
+def load_video_frames_from_video_file(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ compute_device=torch.device("cuda"),
+):
+ """Load the video frames from a video file."""
+ import decord
+
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
+ # Get the original video height and width
+ decord.bridge.set_bridge("torch")
+ video_height, video_width, _ = decord.VideoReader(video_path).next().shape
+ # Iterate over all frames in the video
+ images = []
+ for frame in decord.VideoReader(video_path, width=image_size, height=image_size):
+ images.append(frame.permute(2, 0, 1))
+
+ images = torch.stack(images, dim=0).float() / 255.0
+ if not offload_video_to_cpu:
+ images = images.to(compute_device)
+ img_mean = img_mean.to(compute_device)
+ img_std = img_std.to(compute_device)
+ # normalize by mean and std
+ images -= img_mean
+ images /= img_std
+ return images, video_height, video_width
+
+
+def fill_holes_in_mask_scores(mask, max_area):
+ """
+ A post processor to fill small holes in mask scores with area under `max_area`.
+ """
+ # Holes are those connected components in background with area <= self.max_area
+ # (background regions are those with mask scores <= 0)
+ assert max_area > 0, "max_area must be positive"
+
+ input_mask = mask
+ try:
+ labels, areas = get_connected_components(mask <= 0)
+ is_hole = (labels > 0) & (areas <= max_area)
+ # We fill holes with a small positive mask score (0.1) to change them to foreground.
+ mask = torch.where(is_hole, 0.1, mask)
+ except Exception as e:
+ # Skip the post-processing step on removing small holes if the CUDA kernel fails
+ warnings.warn(
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
+ "functionality may be limited (which doesn't affect the results in most cases; see "
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ mask = input_mask
+
+ return mask
+
+
+def concat_points(old_point_inputs, new_points, new_labels):
+ """Add new points and labels to previous point inputs (add at the end)."""
+ if old_point_inputs is None:
+ points, labels = new_points, new_labels
+ else:
+ points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
+ labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
+
+ return {"point_coords": points, "point_labels": labels}
diff --git a/projects/PCSegSAM2/sam2/utils/transforms.py b/projects/PCSegSAM2/sam2/utils/transforms.py
new file mode 100644
index 00000000..8e8292d5
--- /dev/null
+++ b/projects/PCSegSAM2/sam2/utils/transforms.py
@@ -0,0 +1,108 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision.transforms import Normalize, Resize, ToTensor
+
+
+class SAM2Transforms(nn.Module):
+ def __init__(self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0):
+ """
+ Transforms for SAM2.
+ """
+ super().__init__()
+ self.resolution = resolution
+ self.mask_threshold = mask_threshold
+ self.max_hole_area = max_hole_area
+ self.max_sprinkle_area = max_sprinkle_area
+ self.mean = [0.485, 0.456, 0.406]
+ self.std = [0.229, 0.224, 0.225]
+ self.to_tensor = ToTensor()
+ self.transforms = torch.jit.script(
+ nn.Sequential(
+ Resize((self.resolution, self.resolution)),
+ Normalize(self.mean, self.std),
+ )
+ )
+
+ def __call__(self, x):
+ x = self.to_tensor(x)
+ return self.transforms(x)
+
+ def forward_batch(self, img_list):
+ img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
+ img_batch = torch.stack(img_batch, dim=0)
+ return img_batch
+
+ def transform_coords(self, coords: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
+ """
+ Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
+ If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
+
+ Returns
+ Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
+ """
+ if normalize:
+ assert orig_hw is not None
+ h, w = orig_hw
+ coords = coords.clone()
+ coords[..., 0] = coords[..., 0] / w
+ coords[..., 1] = coords[..., 1] / h
+
+ coords = coords * self.resolution # unnormalize coords
+ return coords
+
+ def transform_boxes(self, boxes: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
+ """
+ Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
+ if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
+ """
+ boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
+ return boxes
+
+ def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
+ """
+ Perform PostProcessing on output masks.
+ """
+ from sam2.utils.misc import get_connected_components
+
+ masks = masks.float()
+ input_masks = masks
+ mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
+ try:
+ if self.max_hole_area > 0:
+ # Holes are those connected components in background with area <= self.fill_hole_area
+ # (background regions are those with mask scores <= self.mask_threshold)
+ labels, areas = get_connected_components(mask_flat <= self.mask_threshold)
+ is_hole = (labels > 0) & (areas <= self.max_hole_area)
+ is_hole = is_hole.reshape_as(masks)
+ # We fill holes with a small positive mask score (10.0) to change them to foreground.
+ masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
+
+ if self.max_sprinkle_area > 0:
+ labels, areas = get_connected_components(mask_flat > self.mask_threshold)
+ is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
+ is_hole = is_hole.reshape_as(masks)
+ # We fill holes with negative mask score (-10.0) to change them to background.
+ masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
+ except Exception as e:
+ # Skip the post-processing step if the CUDA kernel fails
+ warnings.warn(
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
+ "functionality may be limited (which doesn't affect the results in most cases; see "
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ masks = input_masks
+
+ masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False)
+ return masks
diff --git a/projects/PCSegSAM2/segment_t4dataset_cuboids.py b/projects/PCSegSAM2/segment_t4dataset_cuboids.py
new file mode 100644
index 00000000..c5721e0f
--- /dev/null
+++ b/projects/PCSegSAM2/segment_t4dataset_cuboids.py
@@ -0,0 +1,239 @@
+import argparse
+import concurrent.futures
+import logging
+import os
+import os.path as osp
+import re
+import warnings
+from pathlib import Path
+from typing import Any, Dict, List
+
+import numpy as np
+import yaml
+from mmengine.config import Config
+from t4_devkit import Tier4
+from t4_devkit.schema import Sample
+from tqdm import tqdm
+
+from tools.detection3d.t4dataset_converters.t4converter import (
+ extract_tier4_data,
+)
+
+
+def get_lidar_token(sample_rec: Sample) -> str:
+ data_dict = sample_rec.data
+ if "LIDAR_TOP" in data_dict:
+ return data_dict["LIDAR_TOP"]
+ elif "LIDAR_CONCAT" in data_dict:
+ return data_dict["LIDAR_CONCAT"]
+ else:
+ return None
+
+
+def get_scene_root_dir_path(
+ root_path: str,
+ dataset_version: str,
+ scene_id: str,
+) -> str:
+ """
+ This function checks if the provided `scene_root_dir_path` follows the new directory structure
+ of the T4 Dataset, which should look like `$T4DATASET_VERSION/$T4DATASET_ID/$VERSION_ID/`.
+ If the `scene_root_dir_path` does contain a version directory, it searches for the latest version directory
+ under the `scene_root_dir_path` and returns the updated path.
+ If no version directory is found, it prints a deprecation warning and returns the original `scene_root_dir_path`.
+
+ Args:
+ root_path (str): The root path of the T4 Dataset.
+ dataset_version (str): The dataset version like 'db_jpntaxi_v2'
+ scene_id: The scene id token.
+ Returns:
+ str: The updated path containing the version directory if it exists,
+ otherwise the original `scene_root_dir_path`.
+ """
+ # an integer larger than or equal to 0
+ version_pattern = re.compile(r"^\d+$")
+
+ scene_root_dir_path = osp.join(root_path, dataset_version, scene_id)
+
+ version_dirs = [d for d in os.listdir(scene_root_dir_path) if version_pattern.match(d)]
+
+ if version_dirs:
+ version_id = sorted(version_dirs, key=int)[-1]
+ return os.path.join(scene_root_dir_path, version_id)
+ else:
+ warnings.simplefilter("always")
+ warnings.warn(
+ f"The directory structure of T4 Dataset is deprecated. In the newer version, the directory structure should look something like `$T4DATASET_ID/$VERSION_ID/`. Please update your Web.Auto CLI to the latest version.",
+ DeprecationWarning,
+ )
+ return scene_root_dir_path
+
+
+def segment_pointcloud(
+ root_path: str,
+ cfg: Any,
+ segmentation_cfg: Any,
+ t4: Tier4,
+ sample: Sample,
+ i: int,
+):
+ lidar_token = get_lidar_token(sample)
+ if lidar_token is None:
+ logging.warn(f"sample {sample['token']} doesn't have lidar")
+ return
+ (
+ pose_record,
+ cs_record,
+ sd_record,
+ scene_record,
+ log_record,
+ boxes,
+ lidar_path,
+ e2g_r_mat,
+ l2e_r_mat,
+ e2g_t,
+ l2e_t,
+ ) = extract_tier4_data(t4, sample, lidar_token)
+
+ lidar_l2e_transform = np.eye(4, dtype=np.float32)
+ lidar_l2e_transform[0:3, 0:3] = l2e_r_mat
+ lidar_l2e_transform[0:3, 3] = l2e_t
+
+ cuboid_segmentation_cfg = segmentation_cfg["cuboid_segmentation"]
+ invalid_value = cuboid_segmentation_cfg["invalid_value"]
+ reset_classes = cuboid_segmentation_cfg["reset_classes"]
+ cuboid_to_segmentation_class_map = cuboid_segmentation_cfg["classes_map"]
+
+ # Load points
+ points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
+ num_points = points.shape[0]
+ points_lcs = np.hstack([points[:, 0:3], np.ones((num_points, 1))])
+ points_ecs = points_lcs @ lidar_l2e_transform.T
+
+ lidar_path = Path(lidar_path)
+ basename = lidar_path.name.split(".")[0]
+ seg_path = lidar_path.parent / f"{basename}_seg.npy"
+
+ if seg_path.exists():
+ seg_pointcloud = np.load(str(seg_path)).reshape([-1])
+ else:
+ seg_pointcloud = np.full((num_points,), invalid_value, dtype=np.uint8)
+
+ if len(boxes) > 0:
+ for idx in reset_classes:
+ seg_pointcloud[seg_pointcloud == idx] = invalid_value
+
+ # NOTE(knzo25): if the segmentation is slow, this can be easily parallelized
+ for box in boxes:
+
+ center = box.position
+ rotation = box.rotation.rotation_matrix
+
+ transform = np.eye(4, dtype=np.float32)
+ transform[0:3, 0:3] = rotation
+ transform[0:3, 3] = center
+ transform = np.linalg.inv(transform)
+
+ points_box = points_ecs @ transform.T
+ shape = box.shape.size
+
+ mask = np.logical_and.reduce(
+ (
+ np.abs(points_box[:, 0]) <= 0.5 * shape[1],
+ np.abs(points_box[:, 1]) <= 0.5 * shape[0],
+ np.abs(points_box[:, 2]) <= 0.5 * shape[2],
+ )
+ )
+
+ segmentation_idx = cuboid_to_segmentation_class_map[box.semantic_label.name]
+ seg_pointcloud[mask] = segmentation_idx
+
+ try:
+ with open(str(seg_path), "wb") as f:
+ np.save(f, seg_pointcloud.astype(np.uint8))
+ except Exception as e:
+ logging.error(f"Failed to save segmentation file {str(seg_path)}: {e}")
+
+ return
+
+
+def segment_scene(args, cfg, segmentation_cfg, dataset_version, scene_id):
+
+ logging.info(f"Segmenting pointclouds from scene: {scene_id}")
+ scene_root_dir_path = get_scene_root_dir_path(
+ args.root_path,
+ dataset_version,
+ scene_id,
+ )
+
+ if not osp.isdir(scene_root_dir_path):
+ raise ValueError(f"{scene_root_dir_path} does not exist.")
+
+ t4 = Tier4(version="annotation", data_root=scene_root_dir_path, verbose=False)
+
+ for i, sample in enumerate(tqdm(t4.sample)):
+ segment_pointcloud(args.root_path, cfg, segmentation_cfg, t4, sample, i)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Create data info for T4dataset")
+
+ parser.add_argument(
+ "--config",
+ type=str,
+ required=True,
+ help="config for T4dataset",
+ )
+
+ parser.add_argument(
+ "--segmentation_config",
+ type=str,
+ required=True,
+ help="config for segmentation",
+ )
+
+ parser.add_argument(
+ "--root_path",
+ type=str,
+ required=True,
+ help="specify the root path of dataset",
+ )
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load config
+ cfg = Config.fromfile(args.config)
+
+ logging.basicConfig(level=logging.INFO)
+
+ # TODO(knzo25): hack since I only want to test part of the db
+ cfg.dataset_version_list = ["db_jpntaxi_v2"]
+
+ with open(args.segmentation_config, "r") as f:
+ segmentation_cfg = yaml.safe_load(f)
+
+ num_workers = segmentation_cfg["projective_segmentation"]["num_workers"]
+
+ for dataset_version in cfg.dataset_version_list:
+ dataset_list = osp.join(cfg.dataset_version_config_root, dataset_version + ".yaml")
+ with open(dataset_list, "r") as f:
+ dataset_list_dict: Dict[str, List[str]] = yaml.safe_load(f)
+
+ for split in ["train", "val", "test"]:
+ logging.info(f"Segmenting split: {split}")
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
+ executor.map(
+ lambda scene_id: segment_scene(args, cfg, segmentation_cfg, dataset_version, scene_id),
+ dataset_list_dict.get(split, []),
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/projects/PCSegSAM2/segment_t4dataset_projective.py b/projects/PCSegSAM2/segment_t4dataset_projective.py
new file mode 100644
index 00000000..a09500e5
--- /dev/null
+++ b/projects/PCSegSAM2/segment_t4dataset_projective.py
@@ -0,0 +1,379 @@
+import argparse
+import concurrent.futures
+import logging
+import os
+import os.path as osp
+import re
+import warnings
+from pathlib import Path
+from typing import Any, Dict, List
+
+import cv2
+import numpy as np
+import yaml
+from mmengine.config import Config
+from skimage.morphology import binary_dilation, square
+from skimage.segmentation import find_boundaries
+from t4_devkit import Tier4
+from t4_devkit.schema import Sample
+from tqdm import tqdm
+
+from tools.detection3d.t4dataset_converters.t4converter import (
+ extract_tier4_data,
+)
+
+
+def get_lidar_token(sample_rec: Sample) -> str:
+ data_dict = sample_rec.data
+ if "LIDAR_TOP" in data_dict:
+ return data_dict["LIDAR_TOP"]
+ elif "LIDAR_CONCAT" in data_dict:
+ return data_dict["LIDAR_CONCAT"]
+ else:
+ return None
+
+
+def get_scene_root_dir_path(
+ root_path: str,
+ dataset_version: str,
+ scene_id: str,
+) -> str:
+ """
+ This function checks if the provided `scene_root_dir_path` follows the new directory structure
+ of the T4 Dataset, which should look like `$T4DATASET_VERSION/$T4DATASET_ID/$VERSION_ID/`.
+ If the `scene_root_dir_path` does contain a version directory, it searches for the latest version directory
+ under the `scene_root_dir_path` and returns the updated path.
+ If no version directory is found, it prints a deprecation warning and returns the original `scene_root_dir_path`.
+
+ Args:
+ root_path (str): The root path of the T4 Dataset.
+ dataset_version (str): The dataset version like 'db_jpntaxi_v2'
+ scene_id: The scene id token.
+ Returns:
+ str: The updated path containing the version directory if it exists,
+ otherwise the original `scene_root_dir_path`.
+ """
+ # an integer larger than or equal to 0
+ version_pattern = re.compile(r"^\d+$")
+
+ scene_root_dir_path = osp.join(root_path, dataset_version, scene_id)
+
+ version_dirs = [d for d in os.listdir(scene_root_dir_path) if version_pattern.match(d)]
+
+ if version_dirs:
+ version_id = sorted(version_dirs, key=int)[-1]
+ return os.path.join(scene_root_dir_path, version_id)
+ else:
+ warnings.simplefilter("always")
+ warnings.warn(
+ f"The directory structure of T4 Dataset is deprecated. In the newer version, the directory structure should look something like `$T4DATASET_ID/$VERSION_ID/`. Please update your Web.Auto CLI to the latest version.",
+ DeprecationWarning,
+ )
+ return scene_root_dir_path
+
+
+def segment_pointcloud(
+ root_path: str,
+ cfg: Any,
+ segmentation_cfg: Any,
+ t4: Tier4,
+ sample: Sample,
+ i: int,
+):
+ lidar_token = get_lidar_token(sample)
+ if lidar_token is None:
+ logging.warn(f"sample {sample['token']} doesn't have lidar")
+ return
+ (
+ pose_record,
+ cs_record,
+ sd_record,
+ scene_record,
+ log_record,
+ boxes,
+ lidar_path,
+ e2g_r_mat,
+ l2e_r_mat,
+ e2g_t,
+ l2e_t,
+ ) = extract_tier4_data(t4, sample, lidar_token)
+
+ lidar_l2e_transform = np.eye(4, dtype=np.float32)
+ lidar_l2e_transform[0:3, 0:3] = l2e_r_mat
+ lidar_l2e_transform[0:3, 3] = l2e_t
+
+ lidar_e2g_transform = np.eye(4, dtype=np.float32)
+ lidar_e2g_transform[0:3, 0:3] = e2g_r_mat
+ lidar_e2g_transform[0:3, 3] = e2g_t
+
+ camera_types = cfg.camera_types
+ cam_data: List[str, str, np.ndarray, np.ndarray, np.ndarray] = []
+ assert len(camera_types) > 0
+
+ projective_segmentation_cfg = segmentation_cfg["projective_segmentation"]
+ num_consistent_frames = projective_segmentation_cfg["num_consistent_frames"]
+
+ for cam in camera_types:
+ if cam not in sample.data:
+ continue
+
+ cam_token = sample.data[cam]
+
+ num_past_frames = num_consistent_frames // 2
+
+ for _ in range(num_past_frames):
+ sd_record: SampleData = t4.get("sample_data", cam_token)
+
+ if sd_record.prev != "":
+ cam_token = sd_record.prev
+
+ for _ in range(num_consistent_frames):
+
+ sd_record: SampleData = t4.get("sample_data", cam_token)
+ cs_record: CalibratedSensor = t4.get("calibrated_sensor", sd_record.calibrated_sensor_token)
+ pose_record: EgoPose = t4.get("ego_pose", sd_record.ego_pose_token)
+
+ cam_path, boxes, cam_intrinsics = t4.get_sample_data(cam_token)
+
+ c2e_t = cs_record.translation
+ e2g_t = pose_record.translation
+ c2e_r = cs_record.rotation
+ e2g_r = pose_record.rotation
+ c2e_r_mat = c2e_r.rotation_matrix
+ e2g_r_mat = e2g_r.rotation_matrix
+
+ c2e_transform = np.eye(4, dtype=np.float32)
+ c2e_transform[0:3, 0:3] = c2e_r_mat
+ c2e_transform[0:3, 3] = c2e_t
+
+ e2g_transform = np.eye(4, dtype=np.float32)
+ e2g_transform[0:3, 0:3] = e2g_r_mat
+ e2g_transform[0:3, 3] = e2g_t
+
+ cam2_img_transform = np.eye(4, dtype=np.float32)
+ cam2_img_transform[0:3, 0:3] = cam_intrinsics
+
+ cam_data.append(
+ [cam, cam_path, np.linalg.inv(e2g_transform), np.linalg.inv(c2e_transform), cam2_img_transform]
+ )
+
+ if sd_record.next == "":
+ break
+
+ cam_token = sd_record.next
+
+ # Load points
+ points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
+ num_points = points.shape[0]
+ points_lcs = np.hstack([points[:, 0:3], np.ones((num_points, 1))])
+
+ points_ecs = points_lcs @ lidar_l2e_transform.T
+ points_gcs = points_ecs @ lidar_e2g_transform.T
+
+ # Load segmented images
+ seg_pointcloud_list = []
+ on_img_mask_list = []
+
+ background_value = projective_segmentation_cfg["background_value"]
+ invalid_value = projective_segmentation_cfg["invalid_value"]
+
+ fill_boundaries_with_invalid = projective_segmentation_cfg["fill_boundaries_with_invalid"]
+ fill_boundaries_width = projective_segmentation_cfg["fill_boundaries_width"]
+
+ mapping_dict = get_class_mapping(segmentation_cfg)
+
+ for cam, img_path, g2e_transform, e2c_transform, cam2img_transform in cam_data:
+
+ img_path = Path(img_path)
+ seg_img_path = img_path.with_name(img_path.stem + "_seg.png")
+ seg_image = cv2.imread(str(seg_img_path), cv2.IMREAD_GRAYSCALE).astype(np.int32)
+ h, w = seg_image.shape
+
+ # Mapping from SAM2 classes to lidar segmentation classes
+ seg_image = np.vectorize(mapping_dict.__getitem__)(seg_image)
+
+ # There are cases where SAM fill the image with one class.
+ # Skip those for now
+ if seg_image.min() == seg_image.max():
+ print(f"class {seg_image.min()} filled the whole image. potential error ({str(img_path)})")
+ continue
+
+ if fill_boundaries_with_invalid:
+
+ boundaries = find_boundaries(seg_image, mode="inner", connectivity=1)
+ selem = square(fill_boundaries_width)
+ boundaries = binary_dilation(boundaries, selem)
+ seg_image[boundaries] = background_value
+
+ points_ecs = points_gcs @ g2e_transform.T
+
+ points_ccs = points_ecs @ e2c_transform.T
+
+ points_ics = points_ccs @ cam2img_transform.T
+ points_ics[:, 0:2] /= points_ics[:, 2:3]
+
+ on_img_mask = np.logical_and.reduce(
+ (
+ points_ics[:, 0] > 0,
+ points_ics[:, 0] <= w,
+ points_ics[:, 1] > 0,
+ points_ics[:, 1] <= h,
+ points_ics[:, 2] > 0,
+ )
+ )
+
+ seg_pointcloud = np.full((num_points,), -1, dtype=np.int32)
+ seg_pointcloud[on_img_mask] = seg_image[
+ points_ics[on_img_mask, 1].astype(np.int32), points_ics[on_img_mask, 0].astype(np.int32)
+ ]
+
+ on_img_mask_list.append(on_img_mask)
+ seg_pointcloud_list.append(seg_pointcloud)
+
+ # Stack all the segmented points and masks
+ seg_pointcloud = np.stack(seg_pointcloud_list, axis=0)
+ seg_pointcloud[seg_pointcloud == -1] == invalid_value
+ on_img_mask = np.stack(on_img_mask_list, axis=0)
+
+ # Create a masked array
+ on_img_non_bg_mask = np.logical_and(on_img_mask, seg_pointcloud != background_value)
+
+ seg_pointcloud_non_bg_masked = np.ma.masked_array(seg_pointcloud, mask=~on_img_non_bg_mask)
+
+ # Check consistency checking differency between min and max
+ seg_pointcloud_non_bg_masked_min = np.ma.min(seg_pointcloud_non_bg_masked, axis=0)
+ seg_pointcloud_non_bg_masked_max = np.ma.max(seg_pointcloud_non_bg_masked, axis=0)
+ seg_pointcloud_non_bg_valid = seg_pointcloud_non_bg_masked_min == seg_pointcloud_non_bg_masked_max
+ seg_pointcloud_non_bg_valid.set_fill_value(False)
+ seg_pointcloud_non_bg_valid = seg_pointcloud_non_bg_valid.filled()
+
+ seg_pointcloud_combined = np.full((num_points,), invalid_value, dtype=np.uint8)
+ seg_pointcloud_combined[seg_pointcloud_non_bg_valid] = seg_pointcloud_non_bg_masked_max[
+ seg_pointcloud_non_bg_valid
+ ]
+
+ # Dummy ground filter to avoid vehicles and other classes to leak into the ground
+ # This may cause small objects to not be classified correctly, but this is just a test
+ ground_value = projective_segmentation_cfg["ground_value"]
+ min_non_ground_z = projective_segmentation_cfg["min_non_ground_z"]
+
+ points_ecs = points_lcs @ lidar_l2e_transform.T
+ update_ground_mask = np.logical_and.reduce(
+ (
+ seg_pointcloud_combined != invalid_value,
+ seg_pointcloud_combined != ground_value,
+ points_ecs[:, 2] <= min_non_ground_z,
+ )
+ )
+
+ seg_pointcloud_combined[update_ground_mask] = invalid_value
+
+ lidar_path = Path(lidar_path)
+ basename = lidar_path.name.split(".")[0]
+ seg_path = lidar_path.parent / f"{basename}_seg.npy"
+
+ with open(seg_path, "wb") as f:
+ np.save(f, seg_pointcloud_combined)
+
+ return
+
+
+def get_class_mapping(cfg: Any) -> Dict[int, int]:
+
+ sam2_cfg = cfg["sam2"]
+ projective_segmentation_cfg = cfg["projective_segmentation"]
+
+ mapping_dict = {}
+ mapping_dict[sam2_cfg["background_value"]] = projective_segmentation_cfg["background_value"]
+
+ sam2_class_to_idx = {}
+
+ for i, class_name in enumerate(sam2_cfg["sam2_classes"]):
+ sam2_class_to_idx[class_name] = i
+
+ for class_name, segmentation_idx in projective_segmentation_cfg["classes_map"].items():
+ mapping_dict[sam2_class_to_idx[class_name]] = segmentation_idx
+
+ return mapping_dict
+
+
+def segment_scene(args, cfg, segmentation_cfg, dataset_version, scene_id):
+
+ logging.info(f"Segmenting pointclouds from scene: {scene_id}")
+ scene_root_dir_path = get_scene_root_dir_path(
+ args.root_path,
+ dataset_version,
+ scene_id,
+ )
+
+ if not osp.isdir(scene_root_dir_path):
+ raise ValueError(f"{scene_root_dir_path} does not exist.")
+
+ t4 = Tier4(version="annotation", data_root=scene_root_dir_path, verbose=False)
+
+ for i, sample in enumerate(tqdm(t4.sample)):
+ segment_pointcloud(args.root_path, cfg, segmentation_cfg, t4, sample, i)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Create data info for T4dataset")
+
+ parser.add_argument(
+ "--config",
+ type=str,
+ required=True,
+ help="config for T4dataset",
+ )
+
+ parser.add_argument(
+ "--segmentation_config",
+ type=str,
+ required=True,
+ help="config for segmentation",
+ )
+
+ parser.add_argument(
+ "--root_path",
+ type=str,
+ required=True,
+ help="specify the root path of dataset",
+ )
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ # load config
+ cfg = Config.fromfile(args.config)
+
+ logging.basicConfig(level=logging.INFO)
+
+ # TODO(knzo25): hack since I only want to test part of the db
+ cfg.dataset_version_list = ["db_jpntaxi_v2"]
+
+ with open(args.segmentation_config, "r") as f:
+ segmentation_cfg = yaml.safe_load(f)
+
+ num_workers = segmentation_cfg["projective_segmentation"]["num_workers"]
+
+ for dataset_version in cfg.dataset_version_list:
+ dataset_list = osp.join(cfg.dataset_version_config_root, dataset_version + ".yaml")
+ with open(dataset_list, "r") as f:
+ dataset_list_dict: Dict[str, List[str]] = yaml.safe_load(f)
+
+ for split in ["train", "val", "test"]:
+ logging.info(f"Segmenting split: {split}")
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
+ executor.map(
+ lambda scene_id: segment_scene(args, cfg, segmentation_cfg, dataset_version, scene_id),
+ dataset_list_dict.get(split, []),
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/projects/PCSegSAM2/segment_t4dataset_sam2.py b/projects/PCSegSAM2/segment_t4dataset_sam2.py
new file mode 100644
index 00000000..2b13b5ed
--- /dev/null
+++ b/projects/PCSegSAM2/segment_t4dataset_sam2.py
@@ -0,0 +1,335 @@
+import argparse
+import os
+import os.path as osp
+import re
+import warnings
+from collections import defaultdict
+from pathlib import Path
+from typing import Dict, List
+
+import cv2
+import hydra
+import numpy as np
+import supervision as sv
+import torch
+import yaml
+from groundingdino.util.inference import load_image, load_model, predict
+from hydra import initialize
+from mmengine.config import Config
+from mmengine.logging import print_log
+from sam2.build_sam import build_sam2
+from sam2.sam2_image_predictor import SAM2ImagePredictor
+from t4_devkit import Tier4
+from torchvision.ops import box_convert
+from tqdm import tqdm
+
+
+class SAM2Wrapper:
+
+ def __init__(self, cfg):
+
+ self.cfg = cfg
+ self.sam2_classes = self.cfg["sam2_classes"]
+ self.text_prompt = ". ".join(self.sam2_classes) + "."
+
+ self.sam2_checkpoint = self.cfg["sam2_checkpoint"]
+ self.sam2_cfg = self.cfg["sam2_cfg"]
+ self.grounding_dino_checkpoint = self.cfg["grounding_dino_checkpoint"]
+ self.grounding_dino_cfg = self.cfg["grounding_dino_cfg"]
+ self.background_value = self.cfg["background_value"]
+
+ self.box_threshold = self.cfg["box_threshold"]
+ self.text_threshold = self.cfg["text_threshold"]
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ # environment settings
+ # use bfloat16
+
+ # build SAM2 image predictor
+
+ hydra.core.global_hydra.GlobalHydra.instance().clear()
+ config_dir = ""
+ with initialize(config_path=config_dir):
+ self.sam2_model = build_sam2(self.sam2_cfg, self.sam2_checkpoint, device=self.device)
+ self.sam2_predictor = SAM2ImagePredictor(self.sam2_model)
+
+ # build grounding dino model
+ self.grounding_model = load_model(
+ model_config_path=self.grounding_dino_cfg,
+ model_checkpoint_path=self.grounding_dino_checkpoint,
+ device=self.device,
+ )
+
+ def get_best_label(self, sam2_label: str, sam2_classes: List[str]):
+
+ sam2_label_list = sam2_label.split(" ")
+
+ for i in range(len(sam2_label_list)):
+ candidate = " ".join(sam2_label_list[0 : i + 1])
+ if candidate in sam2_classes:
+ return candidate
+
+ return ""
+
+ def segment(self, img_path, override):
+
+ img_path = Path(img_path)
+ seg_img_path = img_path.with_name(img_path.stem + "_seg.png")
+ anno_img_path = img_path.with_name(img_path.stem + "_anno.jpg")
+
+ if seg_img_path.exists() and not override:
+ return None
+
+ image_source, image = load_image(str(img_path))
+
+ self.sam2_predictor.set_image(image_source)
+
+ boxes, confidences, labels = predict(
+ model=self.grounding_model,
+ image=image,
+ caption=self.text_prompt,
+ box_threshold=self.box_threshold,
+ text_threshold=self.text_threshold,
+ )
+
+ # process the box prompt for SAM 2
+ h, w, _ = image_source.shape
+ boxes = boxes * torch.Tensor([w, h, w, h])
+ input_boxes = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
+
+ # FIXME: figure how does this influence the G-DINO model
+ with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
+
+ if torch.cuda.get_device_properties(0).major >= 8:
+ # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+
+ if len(input_boxes) > 0:
+
+ masks, scores, logits = self.sam2_predictor.predict(
+ point_coords=None,
+ point_labels=None,
+ box=input_boxes,
+ multimask_output=False,
+ )
+
+ else:
+
+ masks = np.array([]).reshape(0, h, w)
+ scores = []
+ logits = []
+
+ # Class image creation
+
+ class_image = np.full((h, w, 1), self.background_value, dtype=np.uint8)
+ label_to_class_idx = {}
+
+ for idx, label in enumerate(self.sam2_classes):
+ label_to_class_idx[label] = idx
+
+ for instance_idx in reversed(range(len(confidences))):
+ instance_label = self.get_best_label(labels[instance_idx], self.sam2_classes)
+
+ if instance_label in label_to_class_idx:
+ class_idx = label_to_class_idx[instance_label]
+ else:
+ print(f"Unrecognized label: {labels[instance_idx]}")
+ continue
+
+ mask = masks[instance_idx].squeeze().astype(np.bool_)
+
+ # if mask[240, 800]:
+ # x = 0
+
+ class_image[mask] = class_idx
+
+ cv2.imwrite(str(seg_img_path), class_image)
+
+ # convert the shape to (n, H, W)
+ if masks.ndim == 4:
+ masks = masks.squeeze(1)
+
+ confidences = confidences.numpy().tolist()
+ class_names = labels
+
+ class_ids = np.array(list(range(len(class_names))))
+
+ labels = [f"{class_name} {confidence:.2f}" for class_name, confidence in zip(class_names, confidences)]
+
+ img = cv2.imread(img_path)
+ detections = sv.Detections(
+ xyxy=input_boxes, mask=masks.astype(bool), class_id=class_ids # (n, 4) # (n, h, w)
+ )
+
+ box_annotator = sv.BoxAnnotator()
+ annotated_frame = box_annotator.annotate(scene=img.copy(), detections=detections)
+
+ label_annotator = sv.LabelAnnotator()
+ annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
+
+ mask_annotator = sv.MaskAnnotator()
+ annotated_frame = mask_annotator.annotate(scene=annotated_frame, detections=detections)
+ cv2.imwrite(str(anno_img_path), annotated_frame, [cv2.IMWRITE_JPEG_QUALITY, 40])
+
+ return annotated_frame
+
+
+def get_scene_root_dir_path(
+ root_path: str,
+ dataset_version: str,
+ scene_id: str,
+) -> str:
+ """
+ This function checks if the provided `scene_root_dir_path` follows the new directory structure
+ of the T4 Dataset, which should look like `$T4DATASET_VERSION/$T4DATASET_ID/$VERSION_ID/`.
+ If the `scene_root_dir_path` does contain a version directory, it searches for the latest version directory
+ under the `scene_root_dir_path` and returns the updated path.
+ If no version directory is found, it prints a deprecation warning and returns the original `scene_root_dir_path`.
+
+ Args:
+ root_path (str): The root path of the T4 Dataset.
+ dataset_version (str): The dataset version like 'db_jpntaxi_v2'
+ scene_id: The scene id token.
+ Returns:
+ str: The updated path containing the version directory if it exists,
+ otherwise the original `scene_root_dir_path`.
+ """
+ # an integer larger than or equal to 0
+ version_pattern = re.compile(r"^\d+$")
+
+ scene_root_dir_path = osp.join(root_path, dataset_version, scene_id)
+
+ version_dirs = [d for d in os.listdir(scene_root_dir_path) if version_pattern.match(d)]
+
+ if version_dirs:
+ version_id = sorted(version_dirs, key=int)[-1]
+ return os.path.join(scene_root_dir_path, version_id)
+ else:
+ warnings.simplefilter("always")
+ warnings.warn(
+ f"The directory structure of T4 Dataset is deprecated. In the newer version, the directory structure should look something like `$T4DATASET_ID/$VERSION_ID/`. Please update your Web.Auto CLI to the latest version.",
+ DeprecationWarning,
+ )
+ return scene_root_dir_path
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Create data info for T4dataset")
+
+ parser.add_argument(
+ "--dataset_config",
+ type=str,
+ required=True,
+ help="config for T4dataset",
+ )
+
+ parser.add_argument(
+ "--segmentation_config",
+ type=str,
+ required=True,
+ help="config for sam2 + grounding dino",
+ )
+
+ parser.add_argument(
+ "--root_path",
+ type=str,
+ required=True,
+ help="specify the root path of dataset",
+ )
+
+ parser.add_argument(
+ "--out_videos",
+ type=str,
+ required=True,
+ help="directory to save segmented videos",
+ )
+
+ args = parser.parse_args()
+ return args
+
+
+def make_video(video_folder, scene_id, cam_name, images):
+
+ if len(images) == 0:
+ print("Empty list. Already processed (?)")
+ return
+
+ height, width, layers = images[0].shape
+
+ # Define output video settings
+ output_file = Path(video_folder) / f"{scene_id}_{cam_name}.mp4"
+ fps = 2 # frames per second
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+
+ # Create the video writer
+ video_writer = cv2.VideoWriter(output_file, fourcc, fps, (width // 2, height // 2))
+
+ # Write each image to the video
+ for image in images:
+ image = cv2.resize(image, (width // 2, height // 2))
+ video_writer.write(image)
+
+ video_writer.release()
+ print(f"Video created successfully: {output_file}")
+
+
+def main():
+ args = parse_args()
+
+ # load config
+ dataset_cfg = Config.fromfile(args.dataset_config)
+ os.makedirs(args.out_videos, exist_ok=True)
+
+ # load config
+ with open(args.segmentation_config, "r") as f:
+ segmentation_cfg = yaml.safe_load(f)["sam2"]
+
+ model = SAM2Wrapper(segmentation_cfg)
+
+ for dataset_version in tqdm(dataset_cfg.dataset_version_list):
+ dataset_list = osp.join(dataset_cfg.dataset_version_config_root, dataset_version + ".yaml")
+ with open(dataset_list, "r") as f:
+ dataset_list_dict: Dict[str, List[str]] = yaml.safe_load(f)
+
+ for split in tqdm(["train", "val", "test"]):
+ print_log(f"Segmenting images from split: {split}", logger="current")
+ for scene_id in tqdm(dataset_list_dict.get(split, [])):
+ print_log(f"Segmented images from scene: {scene_id}")
+ scene_root_dir_path = get_scene_root_dir_path(
+ args.root_path,
+ dataset_version,
+ scene_id,
+ )
+
+ if not osp.isdir(scene_root_dir_path):
+ raise ValueError(f"{scene_root_dir_path} does not exist.")
+
+ t4 = Tier4(version="annotation", data_root=scene_root_dir_path, verbose=False)
+ # scene_seg_images_dict = {camera_name: [] for camera_name in dataset_cfg.camera_types}
+ scene_seg_images_dict = defaultdict(list)
+
+ for i, sample_data in enumerate(tqdm(t4.sample_data)):
+
+ if sample_data.fileformat not in ("jpg", "png") or (
+ segmentation_cfg["only_key_frames"] and not sample_data.is_key_frame
+ ):
+ continue
+
+ cam_name = sample_data.channel
+
+ seg_img = model.segment(
+ os.path.join(scene_root_dir_path, sample_data.filename), segmentation_cfg["override"]
+ )
+
+ if seg_img is None:
+ continue
+
+ scene_seg_images_dict[cam_name].append(seg_img)
+
+ for cam_name, images in scene_seg_images_dict.items():
+ make_video(args.out_videos, scene_id, cam_name, images)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/projects/PCSegSAM2/setup.py b/projects/PCSegSAM2/setup.py
new file mode 100644
index 00000000..c67a949f
--- /dev/null
+++ b/projects/PCSegSAM2/setup.py
@@ -0,0 +1,174 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+import os
+
+from setuptools import find_packages, setup
+
+# Package metadata
+NAME = "SAM-2"
+VERSION = "1.0"
+DESCRIPTION = "SAM 2: Segment Anything in Images and Videos"
+URL = "https://github.com/facebookresearch/sam2"
+AUTHOR = "Meta AI"
+AUTHOR_EMAIL = "segment-anything@meta.com"
+LICENSE = "Apache 2.0"
+
+# Read the contents of README file
+with open("README.md", "r", encoding="utf-8") as f:
+ LONG_DESCRIPTION = f.read()
+
+# Required dependencies
+REQUIRED_PACKAGES = [
+ "torch>=2.3.1",
+ "torchvision>=0.18.1",
+ "numpy>=1.24.4",
+ "tqdm>=4.66.1",
+ "hydra-core>=1.3.2",
+ "iopath>=0.1.10",
+ "pillow>=9.4.0",
+]
+
+EXTRA_PACKAGES = {
+ "notebooks": [
+ "matplotlib>=3.9.1",
+ "jupyter>=1.0.0",
+ "opencv-python>=4.7.0",
+ "eva-decord>=0.6.1",
+ ],
+ "interactive-demo": [
+ "Flask>=3.0.3",
+ "Flask-Cors>=5.0.0",
+ "av>=13.0.0",
+ "dataclasses-json>=0.6.7",
+ "eva-decord>=0.6.1",
+ "gunicorn>=23.0.0",
+ "imagesize>=1.4.1",
+ "pycocotools>=2.0.8",
+ "strawberry-graphql>=0.243.0",
+ ],
+ "dev": [
+ "black==24.2.0",
+ "usort==1.0.2",
+ "ufmt==2.0.0b2",
+ "fvcore>=0.1.5.post20221221",
+ "pandas>=2.2.2",
+ "scikit-image>=0.24.0",
+ "tensorboard>=2.17.0",
+ "pycocotools>=2.0.8",
+ "tensordict>=0.5.0",
+ "opencv-python>=4.7.0",
+ "submitit>=1.5.1",
+ ],
+}
+
+# By default, we also build the SAM 2 CUDA extension.
+# You may turn off CUDA build with `export SAM2_BUILD_CUDA=0`.
+BUILD_CUDA = os.getenv("SAM2_BUILD_CUDA", "1") == "1"
+# By default, we allow SAM 2 installation to proceed even with build errors.
+# You may force stopping on errors with `export SAM2_BUILD_ALLOW_ERRORS=0`.
+BUILD_ALLOW_ERRORS = os.getenv("SAM2_BUILD_ALLOW_ERRORS", "1") == "1"
+
+# Catch and skip errors during extension building and print a warning message
+# (note that this message only shows up under verbose build mode
+# "pip install -v -e ." or "python setup.py build_ext -v")
+CUDA_ERROR_MSG = (
+ "{}\n\n"
+ "Failed to build the SAM 2 CUDA extension due to the error above. "
+ "You can still use SAM 2 and it's OK to ignore the error above, although some "
+ "post-processing functionality may be limited (which doesn't affect the results in most cases; "
+ "(see https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).\n"
+)
+
+
+def get_extensions():
+ if not BUILD_CUDA:
+ return []
+
+ try:
+ from torch.utils.cpp_extension import CUDAExtension
+
+ srcs = ["sam2/csrc/connected_components.cu"]
+ compile_args = {
+ "cxx": [],
+ "nvcc": [
+ "-DCUDA_HAS_FP16=1",
+ "-D__CUDA_NO_HALF_OPERATORS__",
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
+ "-D__CUDA_NO_HALF2_OPERATORS__",
+ ],
+ }
+ ext_modules = [CUDAExtension("sam2._C", srcs, extra_compile_args=compile_args)]
+ except Exception as e:
+ if BUILD_ALLOW_ERRORS:
+ print(CUDA_ERROR_MSG.format(e))
+ ext_modules = []
+ else:
+ raise e
+
+ return ext_modules
+
+
+try:
+ from torch.utils.cpp_extension import BuildExtension
+
+ class BuildExtensionIgnoreErrors(BuildExtension):
+
+ def finalize_options(self):
+ try:
+ super().finalize_options()
+ except Exception as e:
+ print(CUDA_ERROR_MSG.format(e))
+ self.extensions = []
+
+ def build_extensions(self):
+ try:
+ super().build_extensions()
+ except Exception as e:
+ print(CUDA_ERROR_MSG.format(e))
+ self.extensions = []
+
+ def get_ext_filename(self, ext_name):
+ try:
+ return super().get_ext_filename(ext_name)
+ except Exception as e:
+ print(CUDA_ERROR_MSG.format(e))
+ self.extensions = []
+ return "_C.so"
+
+ cmdclass = {
+ "build_ext": (
+ BuildExtensionIgnoreErrors.with_options(no_python_abi_suffix=True)
+ if BUILD_ALLOW_ERRORS
+ else BuildExtension.with_options(no_python_abi_suffix=True)
+ )
+ }
+except Exception as e:
+ cmdclass = {}
+ if BUILD_ALLOW_ERRORS:
+ print(CUDA_ERROR_MSG.format(e))
+ else:
+ raise e
+
+
+# Setup configuration
+setup(
+ name=NAME,
+ version=VERSION,
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ long_description_content_type="text/markdown",
+ url=URL,
+ author=AUTHOR,
+ author_email=AUTHOR_EMAIL,
+ license=LICENSE,
+ packages=find_packages(exclude="notebooks"),
+ include_package_data=True,
+ install_requires=REQUIRED_PACKAGES,
+ extras_require=EXTRA_PACKAGES,
+ python_requires=">=3.10.0",
+ ext_modules=get_extensions(),
+ cmdclass=cmdclass,
+)