#include <cstdio>
#include <thread>
#include <vector>
#include <cameraserver/CameraServer.h>
#include <frc/TimedRobot.h>
#include <frc/apriltag/AprilTagDetection.h>
#include <frc/apriltag/AprilTagDetector.h>
#include <frc/apriltag/AprilTagPoseEstimator.h>
#include <networktables/IntegerArrayTopic.h>
#include <networktables/NetworkTableInstance.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <units/length.h>
/**
* This is a demo program showing the detection of AprilTags.
* The image is acquired from the USB camera, then any detected AprilTags
* are marked up on the image and sent to the dashboard.
*
* Be aware that the performance on this is much worse than a coprocessor solution!
*/
class Robot : public frc::TimedRobot {
public:
Robot() {
#if defined(__linux__) || defined(_WIN32)
std::thread visionThread(VisionThread);
visionThread.detach();
#else
std::fputs("Vision only available on Linux or Windows.\n", stderr);
#endif
}
#if defined(__linux__) || defined(_WIN32)
private:
static void VisionThread() {
frc::AprilTagDetector detector;
// look for tag36h11, correct 1 error bit
detector.AddFamily("tag36h11", 1);
// Set up Pose Estimator - parameters are for a Microsoft Lifecam HD-3000
frc::AprilTagPoseEstimator::Config poseEstConfig = {
.tagSize = 6.5_in,
.fx = 699.3778103158814,
.fy = 677.7161226393544,
.cx = 345.6059345433618,
.cy = 207.12741326228522};
frc::AprilTagPoseEstimator estimator(poseEstConfig);
// Get the USB camera from CameraServer
cs::UsbCamera camera = frc::CameraServer::StartAutomaticCapture();
camera.SetResolution(640, 480);
// Get a CvSink and CvSource
cs::CvSink cvSink = frc::CameraServer::GetVideo();
cs::CvSource outputStream = frc::CameraServer::PutVideo("Detected", 640, 480);
// Mats are very memory expensive. Lets reuse this Mat.
cv::Mat mat;
cv::Mat grayMat;
std::vector<int64_t> tags;
cv::Scalar outlineColor{0, 255, 0};
cv::Scalar crossColor{0, 0, 255};
// We'll output to NT
auto tagsTable = nt::NetworkTableInstance::GetDefault().GetTable("apriltags");
auto pubTags = tagsTable->GetIntegerArrayTopic("tags").Publish();
while (true) {
if (cvSink.GrabFrame(mat) == 0) {
outputStream.NotifyError(cvSink.GetError());
continue;
}
cv::cvtColor(mat, grayMat, cv::COLOR_BGR2GRAY);
cv::Size g_size = grayMat.size();
frc::AprilTagDetector::Results detections =
detector.Detect(g_size.width, g_size.height, grayMat.data);
tags.clear();
for (const frc::AprilTagDetection* detection : detections) {
tags.push_back(detection->GetId());
// draw lines around the tag
for (int i = 0; i <= 3; i++) {
int j = (i + 1) % 4;
const frc::AprilTagDetection::Point pti = detection->GetCorner(i);
const frc::AprilTagDetection::Point ptj = detection->GetCorner(j);
line(mat, cv::Point(pti.x, pti.y), cv::Point(ptj.x, ptj.y),
outlineColor, 2);
}
// mark the center of the tag
const frc::AprilTagDetection::Point c = detection->GetCenter();
int ll = 10;
line(mat, cv::Point(c.x - ll, c.y), cv::Point(c.x + ll, c.y),
crossColor, 2);
line(mat, cv::Point(c.x, c.y - ll), cv::Point(c.x, c.y + ll),
crossColor, 2);
// identify the tag
putText(mat, std::to_string(detection->GetId()),
cv::Point(c.x + ll, c.y), cv::FONT_HERSHEY_SIMPLEX, 1,
crossColor, 3);
// determine pose
frc::Transform3d pose = estimator.Estimate(*detection);
// put pose into NT
frc::Rotation3d rotation = pose.Rotation();
tagsTable->GetEntry(fmt::format("pose_{}", detection->GetId()))
.SetDoubleArray(
{{ pose.X().value(),
pose.Y().value(),
pose.Z().value(),
rotation.X().value(),
rotation.Y().value(),
rotation.Z().value() }});
}
pubTags.Set(tags);
outputStream.PutFrame(mat);
}
}
#endif
};
#ifndef RUNNING_FRC_TESTS
int main() {
return frc::StartRobot<Robot>();
}
#endif