-
Notifications
You must be signed in to change notification settings - Fork 0
Vision updated #22
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Vision updated #22
Changes from all commits
3e61df9
6fb1112
5fb3326
54e6cdd
cc19856
b1cd6bd
b2076f7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,6 +22,7 @@ | |
| import edu.wpi.first.hal.FRCNetComm.tInstances; | ||
| import edu.wpi.first.hal.FRCNetComm.tResourceType; | ||
| import edu.wpi.first.math.Matrix; | ||
| import edu.wpi.first.math.VecBuilder; | ||
| import edu.wpi.first.math.estimator.SwerveDrivePoseEstimator; | ||
| import edu.wpi.first.math.geometry.Pose2d; | ||
| import edu.wpi.first.math.geometry.Rotation2d; | ||
|
|
@@ -32,6 +33,7 @@ | |
| import edu.wpi.first.math.kinematics.SwerveModuleState; | ||
| import edu.wpi.first.math.numbers.N1; | ||
| import edu.wpi.first.math.numbers.N3; | ||
| import edu.wpi.first.math.util.Units; | ||
| import edu.wpi.first.wpilibj.Alert; | ||
| import edu.wpi.first.wpilibj.DriverStation; | ||
| import edu.wpi.first.wpilibj.DriverStation.Alliance; | ||
|
|
@@ -103,7 +105,11 @@ public Drive(GyroIO gyroIO, ModuleIO flModuleIO, ModuleIO frModuleIO, ModuleIO b | |
| Logger.recordOutput("Odometry/TrajectorySetpoint", targetPose); | ||
| }); | ||
|
|
||
| _poseEstimator = new SwerveDrivePoseEstimator(_kinematics, new Rotation2d(), getModulePositions(), new Pose2d()); | ||
| // Configure state estimation with standard deviations | ||
| var stateStdDevs = VecBuilder.fill(0.05, 0.05, Units.degreesToRadians(5)); // x, y, heading | ||
| var visionStdDevs = VecBuilder.fill(0.5, 0.5, Units.degreesToRadians(30)); // Higher uncertainty for vision | ||
|
Comment on lines
+109
to
+110
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What do these numbers mean? Where did you find them?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. stateStdDevs is base odometry uncertainty
visionStdDevs, default vision uncertainty
These values are used by the SwerveDrivePoseEstimator as default "trust levels," lower numbers mean we trust that measurement more. We trust odometry more than vision by default, but then adjust the vision uncertainty based on: Number of visible tags (more tags = more trust) |
||
|
|
||
| _poseEstimator = new SwerveDrivePoseEstimator(_kinematics, new Rotation2d(), getModulePositions(), new Pose2d(), stateStdDevs, visionStdDevs); | ||
|
|
||
| // Create a list of bezier points from poses. Each pose represents one waypoint. | ||
| // The rotation component of the pose should be the direction of travel. Do not | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,56 @@ | ||
| package frc.robot.subsystems.vision; | ||
|
|
||
| import org.littletonrobotics.junction.Logger; | ||
|
|
||
| import edu.wpi.first.math.VecBuilder; | ||
| import edu.wpi.first.wpilibj2.command.SubsystemBase; | ||
| import frc.robot.Constants; | ||
| import frc.robot.subsystems.drive.Drive; | ||
| import java.util.Arrays; | ||
|
|
||
| public class Vision extends SubsystemBase | ||
| { | ||
| private final VisionIO _io; | ||
| private final VisionIOInputsAutoLogged _inputs = new VisionIOInputsAutoLogged(); | ||
| private final Drive _drive; | ||
|
|
||
| public Vision(Drive drive, VisionIO io) | ||
| { | ||
| _drive = drive; | ||
| _io = io; | ||
| } | ||
|
|
||
| @Override | ||
| public void periodic() | ||
| { | ||
| _io.updateInputs(_inputs); | ||
| Logger.processInputs("Vision", _inputs); | ||
|
|
||
| if (_inputs.hasPose) | ||
| { | ||
| double avgDistance = 0; | ||
| if (_inputs.targetDistances.length > 0) | ||
| { | ||
| avgDistance = Arrays.stream(_inputs.targetDistances).average().getAsDouble(); | ||
| } | ||
|
|
||
| double xyStdDev; | ||
| double thetaStdDev; | ||
|
|
||
| if (_inputs.numTargets > 1) | ||
| { | ||
| xyStdDev = Constants.Vision.VISION_STD_DEV_MULTI_XY + (avgDistance * Constants.Vision.VISION_DISTANCE_SCALE); | ||
| thetaStdDev = Constants.Vision.VISION_STD_DEV_MULTI_THETA; | ||
| } | ||
| else | ||
| { | ||
| xyStdDev = Constants.Vision.VISION_STD_DEV_BASE_XY + (avgDistance * Constants.Vision.VISION_DISTANCE_SCALE); | ||
| thetaStdDev = Constants.Vision.VISION_STD_DEV_BASE_THETA; | ||
| } | ||
|
|
||
| var stdDevs = VecBuilder.fill(xyStdDev, xyStdDev, thetaStdDev); | ||
|
Comment on lines
+40
to
+51
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm assuming PhotonVision has some docs about this standard deviation strategy. Can you provide some links to anything explaining what's going on here? |
||
|
|
||
| _drive.addVisionMeasurement(_inputs.pose, _inputs.captureTimestamp, stdDevs); | ||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,27 @@ | ||
| package frc.robot.subsystems.vision; | ||
|
|
||
| import org.littletonrobotics.junction.AutoLog; | ||
|
|
||
| import edu.wpi.first.math.geometry.Pose2d; | ||
|
|
||
| public interface VisionIO | ||
| { | ||
| @AutoLog | ||
| public static class VisionIOInputs | ||
| { | ||
| public double captureTimestamp = 0.0; | ||
| public Pose2d pose = new Pose2d(); | ||
| public boolean hasTargets = false; | ||
| public boolean hasPose = false; | ||
| public int numTargets = 0; | ||
| public double[] targetDistances = new double[] {}; // Target = specific april tag | ||
| public int[] targetIds = new int[] {}; | ||
| public double[] targetYaws = new double[] {}; | ||
| public double[] targetPitches = new double[] {}; | ||
| public double[] targetAreas = new double[] {}; | ||
| } | ||
|
|
||
| public default void updateInputs(VisionIOInputs inputs) | ||
| { | ||
| } | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,197 @@ | ||
| package frc.robot.subsystems.vision; | ||
|
|
||
| import java.util.ArrayList; | ||
| import java.util.EnumSet; | ||
| import java.util.List; | ||
| import java.util.Optional; | ||
|
|
||
| import org.photonvision.EstimatedRobotPose; | ||
| import org.photonvision.PhotonCamera; | ||
| import org.photonvision.PhotonPoseEstimator; | ||
| import org.photonvision.PhotonPoseEstimator.PoseStrategy; | ||
| import org.photonvision.targeting.PhotonPipelineResult; | ||
| import org.photonvision.targeting.PhotonTrackedTarget; | ||
|
|
||
| import edu.wpi.first.apriltag.AprilTagFieldLayout; | ||
| import edu.wpi.first.apriltag.AprilTagFields; | ||
| import edu.wpi.first.math.geometry.Pose2d; | ||
| import edu.wpi.first.math.geometry.Transform3d; | ||
| import edu.wpi.first.networktables.NetworkTableEvent; | ||
| import edu.wpi.first.networktables.NetworkTableInstance; | ||
| import frc.robot.Constants; | ||
| import frc.robot.subsystems.drive.Drive; | ||
|
|
||
| public class VisionIOPhotonLib implements VisionIO | ||
| { | ||
| private final PhotonCamera _camera; | ||
| private final AprilTagFieldLayout _fieldLayout; | ||
| private final PhotonPoseEstimator _poseEstimator; | ||
| private double _captureTimesStamp = 0.0; | ||
| private Pose2d _pose = new Pose2d(); | ||
| private boolean _hasPose = false; | ||
| private double[] _distances = new double[] {}; | ||
| private int[] _targetIds = new int[] {}; | ||
| private double[] _targetYaws = new double[] {}; | ||
| private int _numProcessedTargets; | ||
|
|
||
| public VisionIOPhotonLib(Drive drive) | ||
| { | ||
| _camera = new PhotonCamera(Constants.Vision.PHOTON_CAMERA_NAME); | ||
|
|
||
| try | ||
| { | ||
| // Load the 2024 field layout | ||
| _fieldLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.kDefaultField.m_resourceFile); | ||
| } | ||
| catch (Exception e) | ||
| { | ||
| System.err.println("Failed to load AprilTag layout: " + e.getMessage()); | ||
| throw new RuntimeException(e); | ||
| } | ||
|
|
||
| _poseEstimator = new PhotonPoseEstimator(_fieldLayout, PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, Constants.Vision.ROBOT_TO_CAMERA); | ||
| _poseEstimator.setMultiTagFallbackStrategy(PoseStrategy.LOWEST_AMBIGUITY); | ||
|
|
||
| NetworkTableInstance.getDefault().addListener(NetworkTableInstance.getDefault().getEntry("/photonvision/" + Constants.Vision.PHOTON_CAMERA_NAME + "/latencyMillis"), EnumSet.of(NetworkTableEvent.Kind.kValueRemote), event -> | ||
| { | ||
| var results = _camera.getAllUnreadResults(); | ||
| if (results.isEmpty()) | ||
| { | ||
| return; | ||
| } | ||
| // get the latest result (last in the list) | ||
| var result = results.get(results.size() - 1); | ||
|
Comment on lines
+62
to
+63
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm assuming this gets whatever the latest result is in case there are multiple results available? We could probably think about changing this to use the multithreaded "multi-input" strategy the drivebase uses for odometry with its queues. That might work here as well and could possibly provide more vision data to the odometer. |
||
|
|
||
| double timestamp = result.getTimestampSeconds(); | ||
| List<PhotonTrackedTarget> targets = result.getTargets(); | ||
|
|
||
| List<Double> distances = new ArrayList<>(); | ||
| List<Integer> ids = new ArrayList<>(); | ||
| List<Double> yaws = new ArrayList<>(); | ||
| List<PhotonTrackedTarget> processedTargets = new ArrayList<>(); | ||
|
|
||
| // Process all targets | ||
| for (PhotonTrackedTarget target : targets) | ||
| { | ||
| ids.add(target.getFiducialId()); | ||
| yaws.add(target.getYaw()); | ||
|
|
||
| var transform = target.getBestCameraToTarget(); | ||
| var distance = Math.hypot(transform.getX(), transform.getY()); | ||
| distances.add(distance); | ||
|
|
||
| if (distance <= Constants.Vision.MAX_DETECTION_RANGE) | ||
| { | ||
| processedTargets.add(target); | ||
| } | ||
| } | ||
|
|
||
| // Get pose estimate from processed targets | ||
| _poseEstimator.setReferencePose(drive.getPose()); | ||
| Optional<EstimatedRobotPose> estimatedPose = _poseEstimator.update(result); | ||
| Pose2d pose = new Pose2d(); | ||
| boolean hasPose = false; | ||
|
|
||
| if (estimatedPose.isPresent()) | ||
| { | ||
| pose = estimatedPose.get().estimatedPose.toPose2d(); | ||
| hasPose = true; | ||
| } | ||
|
|
||
| synchronized (VisionIOPhotonLib.this) | ||
| { | ||
| _captureTimesStamp = timestamp; | ||
| _distances = distances.stream().mapToDouble(Double::doubleValue).toArray(); | ||
| _pose = pose; | ||
| _numProcessedTargets = processedTargets.size(); | ||
| _hasPose = hasPose; | ||
| _targetIds = ids.stream().mapToInt(Integer::intValue).toArray(); | ||
| _targetYaws = yaws.stream().mapToDouble(Double::doubleValue).toArray(); | ||
| } | ||
| }); | ||
| } | ||
|
|
||
| private Optional<EstimatedRobotPose> getEstimatedGlobalPose(Pose2d prevEstimatedRobotPose, PhotonPipelineResult result) | ||
| { | ||
| _poseEstimator.setReferencePose(prevEstimatedRobotPose); | ||
| return _poseEstimator.update(result); | ||
| } | ||
|
|
||
| @Override | ||
| public synchronized void updateInputs(VisionIOInputs inputs) | ||
| { | ||
| var results = _camera.getAllUnreadResults(); | ||
| if (results.isEmpty()) | ||
| { | ||
| // No results available | ||
| inputs.hasTargets = false; | ||
| inputs.numTargets = 0; | ||
| inputs.targetIds = new int[0]; | ||
| inputs.targetYaws = new double[0]; | ||
| inputs.targetPitches = new double[0]; | ||
| inputs.targetAreas = new double[0]; | ||
| inputs.targetDistances = new double[0]; | ||
| inputs.hasPose = false; | ||
| return; | ||
| } | ||
|
|
||
| var result = results.get(results.size() - 1); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same comment on the possible multithreading opportunity here |
||
|
|
||
| inputs.captureTimestamp = result.getTimestampSeconds(); | ||
| inputs.hasTargets = result.hasTargets(); | ||
|
|
||
| if (inputs.hasTargets) | ||
| { | ||
| List<PhotonTrackedTarget> targets = result.getTargets(); | ||
| int numTargets = targets.size(); | ||
|
|
||
| inputs.targetIds = new int[numTargets]; | ||
| inputs.targetYaws = new double[numTargets]; | ||
| inputs.targetPitches = new double[numTargets]; | ||
| inputs.targetAreas = new double[numTargets]; | ||
| inputs.targetDistances = new double[numTargets]; | ||
|
|
||
| for (int i = 0; i < numTargets; i++) | ||
| { | ||
| PhotonTrackedTarget target = targets.get(i); | ||
| inputs.targetIds[i] = target.getFiducialId(); | ||
| inputs.targetYaws[i] = target.getYaw(); | ||
| inputs.targetPitches[i] = target.getPitch(); | ||
| inputs.targetAreas[i] = target.getArea(); | ||
|
|
||
| Transform3d bestCameraToTarget = target.getBestCameraToTarget(); | ||
| inputs.targetDistances[i] = Math.hypot(bestCameraToTarget.getX(), bestCameraToTarget.getY()); | ||
| } | ||
|
|
||
| inputs.numTargets = numTargets; | ||
|
|
||
| // Update pose estimate if we have new data | ||
| if (_captureTimesStamp != inputs.captureTimestamp) | ||
| { | ||
| Optional<EstimatedRobotPose> poseEstimate = getEstimatedGlobalPose(_pose, result); | ||
| if (poseEstimate.isPresent()) | ||
| { | ||
| inputs.pose = poseEstimate.get().estimatedPose.toPose2d(); | ||
| inputs.hasPose = true; | ||
| _pose = inputs.pose; | ||
| } | ||
| else | ||
| { | ||
| inputs.hasPose = false; | ||
| } | ||
| _captureTimesStamp = inputs.captureTimestamp; | ||
| } | ||
| } | ||
| else | ||
| { | ||
| // No targets visible | ||
| inputs.numTargets = 0; | ||
| inputs.targetIds = new int[0]; | ||
| inputs.targetYaws = new double[0]; | ||
| inputs.targetPitches = new double[0]; | ||
| inputs.targetAreas = new double[0]; | ||
| inputs.targetDistances = new double[0]; | ||
| inputs.hasPose = false; | ||
| } | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What do these numbers mean? I'm assuming they'll need to be updated for our robot? How do we find/calculate them?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
From what I know, to tune these, start with these default values and then:
If the robot seems too jumpy: increase the numbers (trust vision less)
If the robot seems slow to correct: decrease the numbers (trust vision more)