Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions src/main/java/frc/robot/Constants.java
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
import com.pathplanner.lib.config.RobotConfig;

import edu.wpi.first.math.geometry.Rotation2d;
import edu.wpi.first.math.geometry.Transform3d;
import edu.wpi.first.math.geometry.Translation2d;
import edu.wpi.first.math.geometry.Translation3d;
import edu.wpi.first.math.geometry.Rotation3d;
import edu.wpi.first.math.system.plant.DCMotor;
import edu.wpi.first.math.util.Units;
import edu.wpi.first.units.measure.Current;
Expand Down Expand Up @@ -106,6 +109,21 @@ General.ROBOT_MASS, General.ROBOT_MOI, new ModuleConfig(WHEEL_RADIUS, General.MA
);
}

public static class Vision
{
public static final String PHOTON_CAMERA_NAME = "photonCam"; // TODO: Name
public static final double MAX_DETECTION_RANGE = Units.inchesToMeters(300);
public static final Transform3d ROBOT_TO_CAMERA = new Transform3d(
new Translation3d(0.5, 0.0, 0.5), // X forward, Y left, Z up from robot center
new Rotation3d(0, Units.degreesToRadians(-30), 0)
); // Camera tilted up 30 degrees
public static final double VISION_STD_DEV_BASE_XY = 0.5; // meters
public static final double VISION_STD_DEV_BASE_THETA = Units.degreesToRadians(30);
public static final double VISION_STD_DEV_MULTI_XY = 0.1;
public static final double VISION_STD_DEV_MULTI_THETA = Units.degreesToRadians(10);
public static final double VISION_DISTANCE_SCALE = 0.2; // How much to increase uncertainty per meter
Comment on lines +120 to +124
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do these numbers mean? I'm assuming they'll need to be updated for our robot? How do we find/calculate them?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From what I know, to tune these, start with these default values and then:

If the robot seems too jumpy: increase the numbers (trust vision less)
If the robot seems slow to correct: decrease the numbers (trust vision more)

}

public static class Elevator
{
public static final double EXTENSION_KP = 0.4;
Expand Down
8 changes: 8 additions & 0 deletions src/main/java/frc/robot/RobotContainer.java
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
import frc.robot.subsystems.drive.ModuleIO;
import frc.robot.subsystems.drive.ModuleIOHardware;
import frc.robot.subsystems.drive.ModuleIOSim;
import frc.robot.subsystems.vision.Vision;
import frc.robot.subsystems.vision.VisionIO;
import frc.robot.subsystems.vision.VisionIOPhotonLib;
import frc.robot.subsystems.elevator.Elevator;
import frc.robot.subsystems.elevator.ElevatorIO;
import frc.robot.subsystems.elevator.ElevatorIOHardware;
Expand All @@ -46,6 +49,8 @@ public class RobotContainer
{
// Subsystems
private final Drive _drive;
@SuppressWarnings("unused")
private final Vision _vision;
private final Elevator _elevator;
private final Manipulator _manipulator;
private final Funnel _funnel;
Expand All @@ -67,6 +72,7 @@ public RobotContainer()
case REAL:
// Real robot, instantiate hardware IO implementations
_drive = new Drive(new GyroIONavX(), new ModuleIOHardware(0), new ModuleIOHardware(1), new ModuleIOHardware(2), new ModuleIOHardware(3));
_vision = new Vision(_drive, new VisionIOPhotonLib(_drive));
_elevator = new Elevator(new ElevatorIOHardware());
_manipulator = new Manipulator(new ManipulatorIOHardware());
_funnel = new Funnel(new FunnelIOHardware());
Expand All @@ -76,6 +82,7 @@ public RobotContainer()
case SIM:
// Sim robot, instantiate physics sim IO implementations
_drive = new Drive(new GyroIO() {}, new ModuleIOSim(), new ModuleIOSim(), new ModuleIOSim(), new ModuleIOSim());
_vision = new Vision(_drive, new VisionIOPhotonLib(_drive));
_elevator = new Elevator(new ElevatorIOSim());
_manipulator = new Manipulator(new ManipulatorIOSim(() -> _controller.leftTrigger().getAsBoolean()));
_funnel = new Funnel(new FunnelIOSim());
Expand All @@ -85,6 +92,7 @@ public RobotContainer()
default:
// Replayed robot, disable IO implementations
_drive = new Drive(new GyroIO() {}, new ModuleIO() {}, new ModuleIO() {}, new ModuleIO() {}, new ModuleIO() {});
_vision = new Vision(_drive, new VisionIO() {});
_elevator = new Elevator(new ElevatorIO() {});
_manipulator = new Manipulator(new ManipulatorIO() {});
_funnel = new Funnel(new FunnelIO() {});
Expand Down
8 changes: 7 additions & 1 deletion src/main/java/frc/robot/subsystems/drive/Drive.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import edu.wpi.first.hal.FRCNetComm.tInstances;
import edu.wpi.first.hal.FRCNetComm.tResourceType;
import edu.wpi.first.math.Matrix;
import edu.wpi.first.math.VecBuilder;
import edu.wpi.first.math.estimator.SwerveDrivePoseEstimator;
import edu.wpi.first.math.geometry.Pose2d;
import edu.wpi.first.math.geometry.Rotation2d;
Expand All @@ -32,6 +33,7 @@
import edu.wpi.first.math.kinematics.SwerveModuleState;
import edu.wpi.first.math.numbers.N1;
import edu.wpi.first.math.numbers.N3;
import edu.wpi.first.math.util.Units;
import edu.wpi.first.wpilibj.Alert;
import edu.wpi.first.wpilibj.DriverStation;
import edu.wpi.first.wpilibj.DriverStation.Alliance;
Expand Down Expand Up @@ -103,7 +105,11 @@ public Drive(GyroIO gyroIO, ModuleIO flModuleIO, ModuleIO frModuleIO, ModuleIO b
Logger.recordOutput("Odometry/TrajectorySetpoint", targetPose);
});

_poseEstimator = new SwerveDrivePoseEstimator(_kinematics, new Rotation2d(), getModulePositions(), new Pose2d());
// Configure state estimation with standard deviations
var stateStdDevs = VecBuilder.fill(0.05, 0.05, Units.degreesToRadians(5)); // x, y, heading
var visionStdDevs = VecBuilder.fill(0.5, 0.5, Units.degreesToRadians(30)); // Higher uncertainty for vision
Comment on lines +109 to +110
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do these numbers mean? Where did you find them?

Copy link
Contributor Author

@SAKETH11111 SAKETH11111 Feb 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

stateStdDevs is base odometry uncertainty

  • 0.05 meters (5cm) for X and Y position
  • 5 degrees for heading These are relatively small because wheel odometry is fairly precise over short distances

visionStdDevs, default vision uncertainty

  • 0.5 meters (50cm) for X and Y position
  • 30 degrees for heading. These are larger because vision measurements tend to be less precise than odometry if we don't see a lot of Apriltags

These values are used by the SwerveDrivePoseEstimator as default "trust levels," lower numbers mean we trust that measurement more. We trust odometry more than vision by default, but then adjust the vision uncertainty based on:

Number of visible tags (more tags = more trust)
Distance to tags (further = less trust)

From: https://docs.wpilib.org/en/stable/docs/software/advanced-controls/state-space/state-space-pose-estimators.html


_poseEstimator = new SwerveDrivePoseEstimator(_kinematics, new Rotation2d(), getModulePositions(), new Pose2d(), stateStdDevs, visionStdDevs);

// Create a list of bezier points from poses. Each pose represents one waypoint.
// The rotation component of the pose should be the direction of travel. Do not
Expand Down
56 changes: 56 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/Vision.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package frc.robot.subsystems.vision;

import org.littletonrobotics.junction.Logger;

import edu.wpi.first.math.VecBuilder;
import edu.wpi.first.wpilibj2.command.SubsystemBase;
import frc.robot.Constants;
import frc.robot.subsystems.drive.Drive;
import java.util.Arrays;

public class Vision extends SubsystemBase
{
private final VisionIO _io;
private final VisionIOInputsAutoLogged _inputs = new VisionIOInputsAutoLogged();
private final Drive _drive;

public Vision(Drive drive, VisionIO io)
{
_drive = drive;
_io = io;
}

@Override
public void periodic()
{
_io.updateInputs(_inputs);
Logger.processInputs("Vision", _inputs);

if (_inputs.hasPose)
{
double avgDistance = 0;
if (_inputs.targetDistances.length > 0)
{
avgDistance = Arrays.stream(_inputs.targetDistances).average().getAsDouble();
}

double xyStdDev;
double thetaStdDev;

if (_inputs.numTargets > 1)
{
xyStdDev = Constants.Vision.VISION_STD_DEV_MULTI_XY + (avgDistance * Constants.Vision.VISION_DISTANCE_SCALE);
thetaStdDev = Constants.Vision.VISION_STD_DEV_MULTI_THETA;
}
else
{
xyStdDev = Constants.Vision.VISION_STD_DEV_BASE_XY + (avgDistance * Constants.Vision.VISION_DISTANCE_SCALE);
thetaStdDev = Constants.Vision.VISION_STD_DEV_BASE_THETA;
}

var stdDevs = VecBuilder.fill(xyStdDev, xyStdDev, thetaStdDev);
Comment on lines +40 to +51
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm assuming PhotonVision has some docs about this standard deviation strategy. Can you provide some links to anything explaining what's going on here?


_drive.addVisionMeasurement(_inputs.pose, _inputs.captureTimestamp, stdDevs);
}
}
}
27 changes: 27 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/VisionIO.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package frc.robot.subsystems.vision;

import org.littletonrobotics.junction.AutoLog;

import edu.wpi.first.math.geometry.Pose2d;

public interface VisionIO
{
@AutoLog
public static class VisionIOInputs
{
public double captureTimestamp = 0.0;
public Pose2d pose = new Pose2d();
public boolean hasTargets = false;
public boolean hasPose = false;
public int numTargets = 0;
public double[] targetDistances = new double[] {}; // Target = specific april tag
public int[] targetIds = new int[] {};
public double[] targetYaws = new double[] {};
public double[] targetPitches = new double[] {};
public double[] targetAreas = new double[] {};
}

public default void updateInputs(VisionIOInputs inputs)
{
}
}
197 changes: 197 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/VisionIOPhotonLib.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
package frc.robot.subsystems.vision;

import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Optional;

import org.photonvision.EstimatedRobotPose;
import org.photonvision.PhotonCamera;
import org.photonvision.PhotonPoseEstimator;
import org.photonvision.PhotonPoseEstimator.PoseStrategy;
import org.photonvision.targeting.PhotonPipelineResult;
import org.photonvision.targeting.PhotonTrackedTarget;

import edu.wpi.first.apriltag.AprilTagFieldLayout;
import edu.wpi.first.apriltag.AprilTagFields;
import edu.wpi.first.math.geometry.Pose2d;
import edu.wpi.first.math.geometry.Transform3d;
import edu.wpi.first.networktables.NetworkTableEvent;
import edu.wpi.first.networktables.NetworkTableInstance;
import frc.robot.Constants;
import frc.robot.subsystems.drive.Drive;

public class VisionIOPhotonLib implements VisionIO
{
private final PhotonCamera _camera;
private final AprilTagFieldLayout _fieldLayout;
private final PhotonPoseEstimator _poseEstimator;
private double _captureTimesStamp = 0.0;
private Pose2d _pose = new Pose2d();
private boolean _hasPose = false;
private double[] _distances = new double[] {};
private int[] _targetIds = new int[] {};
private double[] _targetYaws = new double[] {};
private int _numProcessedTargets;

public VisionIOPhotonLib(Drive drive)
{
_camera = new PhotonCamera(Constants.Vision.PHOTON_CAMERA_NAME);

try
{
// Load the 2024 field layout
_fieldLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.kDefaultField.m_resourceFile);
}
catch (Exception e)
{
System.err.println("Failed to load AprilTag layout: " + e.getMessage());
throw new RuntimeException(e);
}

_poseEstimator = new PhotonPoseEstimator(_fieldLayout, PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, Constants.Vision.ROBOT_TO_CAMERA);
_poseEstimator.setMultiTagFallbackStrategy(PoseStrategy.LOWEST_AMBIGUITY);

NetworkTableInstance.getDefault().addListener(NetworkTableInstance.getDefault().getEntry("/photonvision/" + Constants.Vision.PHOTON_CAMERA_NAME + "/latencyMillis"), EnumSet.of(NetworkTableEvent.Kind.kValueRemote), event ->
{
var results = _camera.getAllUnreadResults();
if (results.isEmpty())
{
return;
}
// get the latest result (last in the list)
var result = results.get(results.size() - 1);
Comment on lines +62 to +63
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm assuming this gets whatever the latest result is in case there are multiple results available? We could probably think about changing this to use the multithreaded "multi-input" strategy the drivebase uses for odometry with its queues. That might work here as well and could possibly provide more vision data to the odometer.


double timestamp = result.getTimestampSeconds();
List<PhotonTrackedTarget> targets = result.getTargets();

List<Double> distances = new ArrayList<>();
List<Integer> ids = new ArrayList<>();
List<Double> yaws = new ArrayList<>();
List<PhotonTrackedTarget> processedTargets = new ArrayList<>();

// Process all targets
for (PhotonTrackedTarget target : targets)
{
ids.add(target.getFiducialId());
yaws.add(target.getYaw());

var transform = target.getBestCameraToTarget();
var distance = Math.hypot(transform.getX(), transform.getY());
distances.add(distance);

if (distance <= Constants.Vision.MAX_DETECTION_RANGE)
{
processedTargets.add(target);
}
}

// Get pose estimate from processed targets
_poseEstimator.setReferencePose(drive.getPose());
Optional<EstimatedRobotPose> estimatedPose = _poseEstimator.update(result);
Pose2d pose = new Pose2d();
boolean hasPose = false;

if (estimatedPose.isPresent())
{
pose = estimatedPose.get().estimatedPose.toPose2d();
hasPose = true;
}

synchronized (VisionIOPhotonLib.this)
{
_captureTimesStamp = timestamp;
_distances = distances.stream().mapToDouble(Double::doubleValue).toArray();
_pose = pose;
_numProcessedTargets = processedTargets.size();
_hasPose = hasPose;
_targetIds = ids.stream().mapToInt(Integer::intValue).toArray();
_targetYaws = yaws.stream().mapToDouble(Double::doubleValue).toArray();
}
});
}

private Optional<EstimatedRobotPose> getEstimatedGlobalPose(Pose2d prevEstimatedRobotPose, PhotonPipelineResult result)
{
_poseEstimator.setReferencePose(prevEstimatedRobotPose);
return _poseEstimator.update(result);
}

@Override
public synchronized void updateInputs(VisionIOInputs inputs)
{
var results = _camera.getAllUnreadResults();
if (results.isEmpty())
{
// No results available
inputs.hasTargets = false;
inputs.numTargets = 0;
inputs.targetIds = new int[0];
inputs.targetYaws = new double[0];
inputs.targetPitches = new double[0];
inputs.targetAreas = new double[0];
inputs.targetDistances = new double[0];
inputs.hasPose = false;
return;
}

var result = results.get(results.size() - 1);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment on the possible multithreading opportunity here


inputs.captureTimestamp = result.getTimestampSeconds();
inputs.hasTargets = result.hasTargets();

if (inputs.hasTargets)
{
List<PhotonTrackedTarget> targets = result.getTargets();
int numTargets = targets.size();

inputs.targetIds = new int[numTargets];
inputs.targetYaws = new double[numTargets];
inputs.targetPitches = new double[numTargets];
inputs.targetAreas = new double[numTargets];
inputs.targetDistances = new double[numTargets];

for (int i = 0; i < numTargets; i++)
{
PhotonTrackedTarget target = targets.get(i);
inputs.targetIds[i] = target.getFiducialId();
inputs.targetYaws[i] = target.getYaw();
inputs.targetPitches[i] = target.getPitch();
inputs.targetAreas[i] = target.getArea();

Transform3d bestCameraToTarget = target.getBestCameraToTarget();
inputs.targetDistances[i] = Math.hypot(bestCameraToTarget.getX(), bestCameraToTarget.getY());
}

inputs.numTargets = numTargets;

// Update pose estimate if we have new data
if (_captureTimesStamp != inputs.captureTimestamp)
{
Optional<EstimatedRobotPose> poseEstimate = getEstimatedGlobalPose(_pose, result);
if (poseEstimate.isPresent())
{
inputs.pose = poseEstimate.get().estimatedPose.toPose2d();
inputs.hasPose = true;
_pose = inputs.pose;
}
else
{
inputs.hasPose = false;
}
_captureTimesStamp = inputs.captureTimestamp;
}
}
else
{
// No targets visible
inputs.numTargets = 0;
inputs.targetIds = new int[0];
inputs.targetYaws = new double[0];
inputs.targetPitches = new double[0];
inputs.targetAreas = new double[0];
inputs.targetDistances = new double[0];
inputs.hasPose = false;
}
}
}