{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"UNav-Server","owner":"ai4ce","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":2,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-21T10:53:33.484Z"}},{"type":"Public","name":"realsense_ROS2_interface","owner":"ai4ce","isFork":false,"description":"This package interfaces the robot/joystick with the realsense ROS2 wrapper","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,5,1,2,5,0,0,3,2,3,1,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-20T00:30:46.367Z"}},{"type":"Public","name":"joy_hand_eye_ROS2","owner":"ai4ce","isFork":false,"description":"This minimal package can be used to perform hand-eye-calibration in a ROS2 environment with a joystick","allTopics":["robotics","hand-eye-calibration","ros2"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-20T00:30:15.104Z"}},{"type":"Public","name":"ur_ros2","owner":"ai4ce","isFork":false,"description":"This package is some additions to the official UR ROS2 driver that enables teleoperation and some more visualization. Developed at AI4CE Lab at NYU.","allTopics":["robotics","universal-robots","ros2","teleoperation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-18T21:03:07.941Z"}},{"type":"Public","name":"xarm_ros2","owner":"ai4ce","isFork":false,"description":"This package is the official xarm ROS2 package with modifications from AI4CE lab at NYU","allTopics":["robotics","ros2","teleoperation","xarm","ufactory"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-11T00:46:23.500Z"}},{"type":"Public","name":"SSCBench","owner":"ai4ce","isFork":false,"description":"SSCBench: A Large-Scale 3D Semantic Scene Completion Benchmark for Autonomous Driving","allTopics":["machine-learning","computer-vision","artificial-intelligence","dataset","autonomous-driving","autonomous-vehicles","occupancy-grid-map","semantic-scene-understanding","2d-to-3d","3d-perception","semantic-scene-completion","3d-scene-understanding"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":165,"forksCount":11,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-10T15:30:48.954Z"}},{"type":"Public","name":"insta360_ros_driver","owner":"ai4ce","isFork":false,"description":"A ROS driver for Insta360 cameras, enabling real-time image capture, processing, and publishing in ROS environments.","allTopics":["driver","ros","webcam","360-camera","insta360","insta360-one-x2","insta360-one-x3","insta360-sdk"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":20,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-29T17:46:07.708Z"}},{"type":"Public","name":"usbcam_ROS2_interface","owner":"ai4ce","isFork":false,"description":"This package interfaces the robot/joystick with a USB cam","allTopics":["robotics","ros2","usbcamera"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-18T08:29:25.302Z"}},{"type":"Public","name":"ai4ce_robot_ROS2_drivers","owner":"ai4ce","isFork":false,"description":"This repo contains all the ROS2 driver packages modified at AI4CE lab for working with various robots","allTopics":["robotics","universal-robots","ros2","ufactory"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-10T10:51:22.577Z"}},{"type":"Public","name":"NYC-Event-VPR","owner":"ai4ce","isFork":false,"description":"","allTopics":["nyc","dataset","vpr","nyc-opendata","event-camera"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-01T18:55:13.013Z"}},{"type":"Public","name":"DeepMapping","owner":"ai4ce","isFork":false,"description":"[CVPR2019 Oral] Self-supervised Point Cloud Map Estimation","allTopics":["mapping","registration","unsupervised-learning","deep-learning","point-cloud"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":193,"forksCount":44,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-25T10:13:42.059Z"}},{"type":"Public","name":"EgoPAT3Dv2","owner":"ai4ce","isFork":false,"description":"","allTopics":["computer-vision","robotics","dataset","human-robot-interaction","human-robot-collaboration","egocentric-vision"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-24T20:36:55.117Z"}},{"type":"Public","name":"ai4ce_sensor_ROS2_interfaces","owner":"ai4ce","isFork":false,"description":"This repo contains all the ROS2 packages developed at AI4CE lab for interfacing with various specialized sensors","allTopics":["robotics","hand-eye-calibration","sensors","realsense","ros2","tactile-sensor","tactile-sensing","gelsight","gelsight-mini"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-23T14:03:33.645Z"}},{"type":"Public","name":"UNav_demo","owner":"ai4ce","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-22T03:04:59.074Z"}},{"type":"Public","name":"SPARE3D","owner":"ai4ce","isFork":false,"description":"[CVPR2020] A Dataset for SPAtial REasoning on Three-View Line Drawings","allTopics":["reasoning","spatial-reasoning","line-drawings","line-drawing","pythonocc","deep-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":1,"starsCount":52,"forksCount":9,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-18T15:09:14.152Z"}},{"type":"Public","name":"NYC-Indoor-VPR","owner":"ai4ce","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T04:55:56.041Z"}},{"type":"Public","name":"DeepMapping2","owner":"ai4ce","isFork":false,"description":"[CVPR2023] DeepMapping2: Self-Supervised Large-Scale LiDAR Map Optimization","allTopics":["mapping","lidar","slam","lidar-slam","lidar-mapping","training-as-optimization","deepmapping","large-scale-mapping"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":156,"forksCount":16,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T08:53:44.212Z"}},{"type":"Public","name":"LLM4VPR","owner":"ai4ce","isFork":false,"description":"Can multimodal LLM help visual place recognition?","allTopics":["robotics","visual-place-recognition","vpr","vision-and-language","llm","vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":28,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T07:37:04.581Z"}},{"type":"Public","name":"LUWA","owner":"ai4ce","isFork":false,"description":"","allTopics":["computer-vision","archeology","anthropology","ai4science","large-vision-language-model"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T01:45:18.151Z"}},{"type":"Public","name":"MARS","owner":"ai4ce","isFork":false,"description":"[CVPR2024] Multiagent Multitraversal Multimodal Self-Driving: Open MARS Dataset","allTopics":["dataset","multiagent","nerf","self-driving","multimodal-deep-learning","collaborative-perception","cvpr2024","3dgs","coperception"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":36,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T01:56:30.376Z"}},{"type":"Public","name":"vis_nav_player","owner":"ai4ce","isFork":false,"description":"[ROB-GY 6203] Example Visual Navigation Player Code for Course Project","allTopics":["robotics","visual-navigation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":12,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-26T16:28:13.438Z"}},{"type":"Public","name":"DeepExplorer","owner":"ai4ce","isFork":false,"description":"[RSS2023] Metric-Free Exploration for Topological Mapping by Task and Motion Imitation in Feature Space","allTopics":["visual-navigation","embodied-ai","visual-exploration","topological-map","habitat-sim","task-and-motion-planning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":24,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-18T15:37:07.907Z"}},{"type":"Public","name":"livox_ros_driver2","owner":"ai4ce","isFork":true,"description":"Livox device driver under Ros(Compatible with ros and ros2), support Lidar HAP and Mid-360.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":201,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-01T17:57:15.356Z"}},{"type":"Public","name":"RealCity3D","owner":"ai4ce","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":22,"forksCount":1,"license":"GNU General Public License v3.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-23T14:21:53.553Z"}},{"type":"Public","name":"Occ4cast","owner":"ai4ce","isFork":false,"description":"Occ4cast: LiDAR-based 4D Occupancy Completion and Forecasting","allTopics":["machine-learning","computer-vision","point-cloud","artificial-intelligence","dataset","lidar","autonomous-driving","autonomous-vehicles","occupancy-grid-map","occupancy-prediction","3d-perception","spatial-temporal-forecasting","3d-scene-understanding","temporal-perception","3d-to-4d"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":112,"forksCount":8,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-18T21:11:48.871Z"}},{"type":"Public","name":"TF-VPR","owner":"ai4ce","isFork":false,"description":"Self-supervised place recognition by exploring temporal and feature neighborhoods","allTopics":["place-recognition","vpr","loop-closure-detection","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":15,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-14T22:00:13.319Z"}},{"type":"Public","name":"EgoPAT3D","owner":"ai4ce","isFork":false,"description":"[CVPR 2022] Egocentric Action Target Prediction in 3D","allTopics":["machine-learning","computer-vision","human-robot-interaction","human-robot-collaboration","target-prediction","multimodal-learning","3d-computer-vision","wearable-robotics","egocentric-vision","deep-learning","dataset"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":3,"starsCount":29,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-19T14:44:30.465Z"}},{"type":"Public","name":"DiscoNet","owner":"ai4ce","isFork":false,"description":"[NeurIPS2021] Learning Distilled Collaboration Graph for Multi-Agent Perception","allTopics":["computer-vision","deep-learning","autonomous-driving","knowledge-distillation","communication-networks","v2v","collaborative-learning","multi-agent-learning","multi-agent-system","graph-learning","3d-object-detection","point-cloud-processing","v2x-communication","multi-agent-perception","3d-scene-understanding","graph"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":136,"forksCount":17,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-15T20:28:54.655Z"}},{"type":"Public","name":"V2X-Sim","owner":"ai4ce","isFork":false,"description":"[RA-L2022] V2X-Sim Dataset and Benchmark","allTopics":["benchmark","machine-learning","computer-vision","simulation","v2x","multi-robot-systems","collaborative-perception","vehicle-to-everything","deep-learning","pytorch","dataset"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":6,"starsCount":111,"forksCount":15,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-15T20:27:23.604Z"}},{"type":"Public","name":"SNAC","owner":"ai4ce","isFork":false,"description":"[ICLR2023] Learning Simultaneous Navigation and Construction in Grid Worlds","allTopics":["construction","benchmark","robotics","navigation","reinforcement-learning-environments","mobile-manipulation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":18,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-07T01:01:27.817Z"}}],"repositoryCount":43,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"ai4ce repositories"}