|
| 1 | +# |
| 2 | +# Copyright (c) FIRST and other WPILib contributors. |
| 3 | +# Open Source Software; you can modify and/or share it under the terms of |
| 4 | +# the WPILib BSD license file in the root directory of this project. |
| 5 | +# |
| 6 | + |
| 7 | +import math |
| 8 | + |
| 9 | +import ntcore |
| 10 | +import wpilib |
| 11 | +import wpilib.simulation |
| 12 | +import wpimath |
| 13 | +import wpimath.units |
| 14 | +import robotpy_apriltag |
| 15 | + |
| 16 | + |
| 17 | +class Drivetrain: |
| 18 | + """Represents a differential drive style drivetrain.""" |
| 19 | + |
| 20 | + kMaxSpeed = 3.0 # meters per second |
| 21 | + kMaxAngularSpeed = math.tau # one rotation per second |
| 22 | + |
| 23 | + kTrackwidth = 0.381 * 2 # meters |
| 24 | + kWheelRadius = 0.0508 # meters |
| 25 | + kEncoderResolution = 4096 |
| 26 | + |
| 27 | + def __init__(self, cameraToObjectTopic: ntcore.DoubleArrayTopic) -> None: |
| 28 | + self.leftLeader = wpilib.PWMSparkMax(1) |
| 29 | + self.leftFollower = wpilib.PWMSparkMax(2) |
| 30 | + self.rightLeader = wpilib.PWMSparkMax(3) |
| 31 | + self.rightFollower = wpilib.PWMSparkMax(4) |
| 32 | + |
| 33 | + self.leftEncoder = wpilib.Encoder(0, 1) |
| 34 | + self.rightEncoder = wpilib.Encoder(2, 3) |
| 35 | + |
| 36 | + self.imu = wpilib.OnboardIMU(wpilib.OnboardIMU.MountOrientation.kFlat) |
| 37 | + |
| 38 | + self.leftPIDController = wpimath.PIDController(1, 0, 0) |
| 39 | + self.rightPIDController = wpimath.PIDController(1, 0, 0) |
| 40 | + |
| 41 | + self.kinematics = wpimath.DifferentialDriveKinematics(self.kTrackwidth) |
| 42 | + |
| 43 | + self.robotToCamera = wpimath.Transform3d( |
| 44 | + wpimath.Translation3d(1, 1, 1), |
| 45 | + wpimath.Rotation3d(0, 0, math.pi / 2), |
| 46 | + ) |
| 47 | + |
| 48 | + self.defaultVal = [0.0] * 7 |
| 49 | + self.cameraToObjectEntry = cameraToObjectTopic.getEntry(self.defaultVal) |
| 50 | + |
| 51 | + layout = robotpy_apriltag.AprilTagFieldLayout.loadField( |
| 52 | + robotpy_apriltag.AprilTagField.k2024Crescendo |
| 53 | + ) |
| 54 | + self.objectInField = layout.getTagPose(0) or wpimath.Pose3d() |
| 55 | + |
| 56 | + self.fieldSim = wpilib.Field2d() |
| 57 | + self.fieldApproximation = wpilib.Field2d() |
| 58 | + |
| 59 | + # Here we use DifferentialDrivePoseEstimator so that we can fuse odometry readings. The |
| 60 | + # numbers used below are robot specific, and should be tuned. |
| 61 | + self.poseEstimator = wpimath.DifferentialDrivePoseEstimator( |
| 62 | + self.kinematics, |
| 63 | + self.imu.getRotation2d(), |
| 64 | + self.leftEncoder.getDistance(), |
| 65 | + self.rightEncoder.getDistance(), |
| 66 | + wpimath.Pose2d(), |
| 67 | + (0.05, 0.05, wpimath.units.degreesToRadians(5)), |
| 68 | + (0.5, 0.5, wpimath.units.degreesToRadians(30)), |
| 69 | + ) |
| 70 | + |
| 71 | + # Gains are for example purposes only - must be determined for your own robot! |
| 72 | + self.feedforward = wpimath.SimpleMotorFeedforwardMeters(1, 3) |
| 73 | + |
| 74 | + # Simulation classes |
| 75 | + self.leftEncoderSim = wpilib.simulation.EncoderSim(self.leftEncoder) |
| 76 | + self.rightEncoderSim = wpilib.simulation.EncoderSim(self.rightEncoder) |
| 77 | + self.drivetrainSystem = wpimath.LinearSystemId.identifyDrivetrainSystem( |
| 78 | + 1.98, 0.2, 1.5, 0.3 |
| 79 | + ) |
| 80 | + self.drivetrainSimulator = wpilib.simulation.DifferentialDrivetrainSim( |
| 81 | + self.drivetrainSystem, |
| 82 | + self.kTrackwidth, |
| 83 | + wpimath.DCMotor.CIM(2), |
| 84 | + 8, |
| 85 | + self.kWheelRadius, |
| 86 | + ) |
| 87 | + |
| 88 | + self.imu.resetYaw() |
| 89 | + |
| 90 | + self.leftLeader.addFollower(self.leftFollower) |
| 91 | + self.rightLeader.addFollower(self.rightFollower) |
| 92 | + |
| 93 | + # We need to invert one side of the drivetrain so that positive voltages |
| 94 | + # result in both sides moving forward. Depending on how your robot's |
| 95 | + # gearbox is constructed, you might have to invert the left side instead. |
| 96 | + self.rightLeader.setInverted(True) |
| 97 | + |
| 98 | + # Set the distance per pulse for the drive encoders. We can simply use the |
| 99 | + # distance traveled for one rotation of the wheel divided by the encoder |
| 100 | + # resolution. |
| 101 | + self.leftEncoder.setDistancePerPulse( |
| 102 | + math.tau * self.kWheelRadius / self.kEncoderResolution |
| 103 | + ) |
| 104 | + self.rightEncoder.setDistancePerPulse( |
| 105 | + math.tau * self.kWheelRadius / self.kEncoderResolution |
| 106 | + ) |
| 107 | + |
| 108 | + self.leftEncoder.reset() |
| 109 | + self.rightEncoder.reset() |
| 110 | + |
| 111 | + wpilib.SmartDashboard.putData("Field", self.fieldSim) |
| 112 | + wpilib.SmartDashboard.putData("FieldEstimation", self.fieldApproximation) |
| 113 | + |
| 114 | + def setSpeeds(self, speeds: wpimath.DifferentialDriveWheelSpeeds) -> None: |
| 115 | + """Sets the desired wheel speeds. |
| 116 | +
|
| 117 | + :param speeds: The desired wheel speeds. |
| 118 | + """ |
| 119 | + leftFeedforward = self.feedforward.calculate(speeds.left) |
| 120 | + rightFeedforward = self.feedforward.calculate(speeds.right) |
| 121 | + |
| 122 | + leftOutput = self.leftPIDController.calculate( |
| 123 | + self.leftEncoder.getRate(), speeds.left |
| 124 | + ) |
| 125 | + rightOutput = self.rightPIDController.calculate( |
| 126 | + self.rightEncoder.getRate(), speeds.right |
| 127 | + ) |
| 128 | + self.leftLeader.setVoltage(leftOutput + leftFeedforward) |
| 129 | + self.rightLeader.setVoltage(rightOutput + rightFeedforward) |
| 130 | + |
| 131 | + def drive(self, xSpeed: float, rot: float) -> None: |
| 132 | + """Drives the robot with the given linear velocity and angular velocity. |
| 133 | +
|
| 134 | + :param xSpeed: Linear velocity in m/s. |
| 135 | + :param rot: Angular velocity in rad/s. |
| 136 | + """ |
| 137 | + wheelSpeeds = self.kinematics.toWheelSpeeds( |
| 138 | + wpimath.ChassisSpeeds(xSpeed, 0.0, rot) |
| 139 | + ) |
| 140 | + self.setSpeeds(wheelSpeeds) |
| 141 | + |
| 142 | + def publishCameraToObject( |
| 143 | + self, |
| 144 | + objectInField: wpimath.Pose3d, |
| 145 | + robotToCamera: wpimath.Transform3d, |
| 146 | + cameraToObjectEntry: ntcore.DoubleArrayEntry, |
| 147 | + drivetrainSimulator: wpilib.simulation.DifferentialDrivetrainSim, |
| 148 | + ) -> None: |
| 149 | + """Computes and publishes to a network tables topic the transformation from the |
| 150 | + camera's pose to the object's pose. This function exists solely for the |
| 151 | + purposes of simulation, and this would normally be handled by computer vision. |
| 152 | +
|
| 153 | + The object could be a target or a fiducial marker. |
| 154 | +
|
| 155 | + :param objectInField: The object's field-relative position. |
| 156 | + :param robotToCamera: The transformation from the robot's pose to the camera's pose. |
| 157 | + :param cameraToObjectEntry: The networktables entry publishing and querying example |
| 158 | + computer vision measurements. |
| 159 | + """ |
| 160 | + robotInField = wpimath.Pose3d(drivetrainSimulator.getPose()) |
| 161 | + cameraInField = robotInField.transformBy(robotToCamera) |
| 162 | + cameraToObject = wpimath.Transform3d(cameraInField, objectInField) |
| 163 | + |
| 164 | + # Publishes double array with Translation3D elements {x, y, z} and Rotation3D elements |
| 165 | + # {w, x, y, z} which describe the cameraToObject transformation. |
| 166 | + quaternion = cameraToObject.rotation().getQuaternion() |
| 167 | + val = [ |
| 168 | + cameraToObject.x, |
| 169 | + cameraToObject.y, |
| 170 | + cameraToObject.z, |
| 171 | + quaternion.w, |
| 172 | + quaternion.x, |
| 173 | + quaternion.y, |
| 174 | + quaternion.z, |
| 175 | + ] |
| 176 | + cameraToObjectEntry.set(val) |
| 177 | + |
| 178 | + def objectToRobotPose( |
| 179 | + self, |
| 180 | + objectInField: wpimath.Pose3d, |
| 181 | + robotToCamera: wpimath.Transform3d, |
| 182 | + cameraToObjectEntry: ntcore.DoubleArrayEntry, |
| 183 | + ) -> wpimath.Pose3d: |
| 184 | + """Queries the camera-to-object transformation from networktables to compute the robot's |
| 185 | + field-relative pose from vision measurements. |
| 186 | +
|
| 187 | + The object could be a target or a fiducial marker. |
| 188 | +
|
| 189 | + :param objectInField: The object's field-relative pose. |
| 190 | + :param robotToCamera: The transformation from the robot's pose to the camera's pose. |
| 191 | + :param cameraToObjectEntry: The networktables entry publishing and querying example |
| 192 | + computer vision measurements. |
| 193 | + """ |
| 194 | + val = cameraToObjectEntry.get() |
| 195 | + |
| 196 | + # Reconstruct cameraToObject Transform3d from networktables. |
| 197 | + translation = wpimath.Translation3d(val[0], val[1], val[2]) |
| 198 | + rotation = wpimath.Rotation3d( |
| 199 | + wpimath.Quaternion(val[3], val[4], val[5], val[6]) |
| 200 | + ) |
| 201 | + cameraToObject = wpimath.Transform3d(translation, rotation) |
| 202 | + |
| 203 | + cameraInField = objectInField.transformBy(cameraToObject.inverse()) |
| 204 | + robotInField = cameraInField.transformBy(robotToCamera.inverse()) |
| 205 | + return robotInField |
| 206 | + |
| 207 | + def updateOdometry(self) -> None: |
| 208 | + """Updates the field-relative position.""" |
| 209 | + self.poseEstimator.update( |
| 210 | + self.imu.getRotation2d(), |
| 211 | + self.leftEncoder.getDistance(), |
| 212 | + self.rightEncoder.getDistance(), |
| 213 | + ) |
| 214 | + |
| 215 | + # Publish cameraToObject transformation to networktables --this would normally be handled by |
| 216 | + # the computer vision solution. |
| 217 | + self.publishCameraToObject( |
| 218 | + self.objectInField, |
| 219 | + self.robotToCamera, |
| 220 | + self.cameraToObjectEntry, |
| 221 | + self.drivetrainSimulator, |
| 222 | + ) |
| 223 | + |
| 224 | + # Compute the robot's field-relative position exclusively from vision measurements. |
| 225 | + visionMeasurement3d = self.objectToRobotPose( |
| 226 | + self.objectInField, self.robotToCamera, self.cameraToObjectEntry |
| 227 | + ) |
| 228 | + |
| 229 | + # Convert robot pose from Pose3d to Pose2d needed to apply vision measurements. |
| 230 | + visionMeasurement2d = visionMeasurement3d.toPose2d() |
| 231 | + |
| 232 | + # Apply vision measurements. For simulation purposes only, we don't input a latency delay -- |
| 233 | + # on a real robot, this must be calculated based either on known latency or timestamps. |
| 234 | + self.poseEstimator.addVisionMeasurement( |
| 235 | + visionMeasurement2d, |
| 236 | + wpilib.Timer.getTimestamp(), |
| 237 | + ) |
| 238 | + |
| 239 | + def simulationPeriodic(self) -> None: |
| 240 | + """This function is called periodically during simulation.""" |
| 241 | + # To update our simulation, we set motor voltage inputs, update the |
| 242 | + # simulation, and write the simulated positions and velocities to our |
| 243 | + # simulated encoder and gyro. |
| 244 | + self.drivetrainSimulator.setInputs( |
| 245 | + self.leftLeader.get() * wpilib.RobotController.getInputVoltage(), |
| 246 | + self.rightLeader.get() * wpilib.RobotController.getInputVoltage(), |
| 247 | + ) |
| 248 | + self.drivetrainSimulator.update(0.02) |
| 249 | + |
| 250 | + self.leftEncoderSim.setDistance(self.drivetrainSimulator.getLeftPosition()) |
| 251 | + self.leftEncoderSim.setRate(self.drivetrainSimulator.getLeftVelocity()) |
| 252 | + self.rightEncoderSim.setDistance(self.drivetrainSimulator.getRightPosition()) |
| 253 | + self.rightEncoderSim.setRate(self.drivetrainSimulator.getRightVelocity()) |
| 254 | + # self.gyroSim.setAngle(-self.drivetrainSimulator.getHeading().getDegrees()) |
| 255 | + |
| 256 | + def periodic(self) -> None: |
| 257 | + """This function is called periodically, no matter the mode.""" |
| 258 | + self.updateOdometry() |
| 259 | + self.fieldSim.setRobotPose(self.drivetrainSimulator.getPose()) |
| 260 | + self.fieldApproximation.setRobotPose(self.poseEstimator.getEstimatedPosition()) |
0 commit comments