VingVing Robot LogoVingVing Robot

Documentation v0.0.3

VingVing Robot Docs

AI-Powered Navigation

Combining VingVing Robot with TensorFlow and machine learning for intelligent autonomous behavior

AI + Path Following

Use machine learning to detect game elements, identify targets, and make real-time decisions, then leverage VingVing Robot's path following to execute the planned movements with precision.

Example 1: TensorFlow Object Detection

ObjectDetectionAuto.java
package org.firstinspires.ftc.teamcode;

import com.qualcomm.robotcore.eventloop.opmode.Autonomous;
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
import org.firstinspires.ftc.vision.tfod.TfodProcessor;
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
import org.firstinspires.ftc.vision.VisionPortal;
import com.vingvingrobot.Robot;
import com.vingvingrobot.follower.Follower;

@Autonomous(name = "AI Object Detection")
public class ObjectDetectionAuto extends LinearOpMode {
    Robot robot;
    Follower follower;

    TfodProcessor tfod;
    VisionPortal visionPortal;

    private static final String[] LABELS = {
        "Pixel",
        "TeamProp"
    };

    @Override
    public void runOpMode() {
        // Initialize VingVing Robot
        robot = new Robot(hardwareMap, RobotConfig.class);
        robot.initializeLocalizer(hardwareMap);
        follower = new Follower(hardwareMap);

        // Initialize TensorFlow
        tfod = new TfodProcessor.Builder()
            .setModelAssetName("CustomModel.tflite")
            .setModelLabels(LABELS)
            .build();

        visionPortal = new VisionPortal.Builder()
            .setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
            .addProcessor(tfod)
            .build();

        telemetry.addLine("Scanning for objects...");
        telemetry.update();

        // Detection during init
        String detectedPosition = "CENTER"; // Default

        while (!isStarted() && !isStopRequested()) {
            List<Recognition> recognitions = tfod.getRecognitions();

            if (recognitions.size() > 0) {
                Recognition object = recognitions.get(0);

                // Determine position based on X coordinate
                double objectX = (object.getLeft() + object.getRight()) / 2;
                double imageWidth = 640; // Camera resolution

                if (objectX < imageWidth / 3) {
                    detectedPosition = "LEFT";
                } else if (objectX > imageWidth * 2 / 3) {
                    detectedPosition = "RIGHT";
                } else {
                    detectedPosition = "CENTER";
                }

                telemetry.addData("Object Detected", object.getLabel());
                telemetry.addData("Confidence", "%.0f%%", object.getConfidence() * 100);
                telemetry.addData("Position", detectedPosition);
            } else {
                telemetry.addLine("No objects detected");
            }
            telemetry.update();
        }

        waitForStart();

        // Navigate based on detected position
        navigateToPosition(detectedPosition);

        telemetry.addLine("✓ Autonomous complete!");
        telemetry.update();
    }

    private void navigateToPosition(String position) {
        Point targetPoint;

        switch (position) {
            case "LEFT":
                targetPoint = new Point(24, 8, Point.CARTESIAN);
                break;
            case "RIGHT":
                targetPoint = new Point(24, -8, Point.CARTESIAN);
                break;
            default: // CENTER
                targetPoint = new Point(24, 0, Point.CARTESIAN);
                break;
        }

        telemetry.addData("Navigating to", position);
        telemetry.update();

        follower.followPath(
            new BezierLine(targetPoint)
                .setConstantHeadingInterpolation(0)
                .setPathEndCallback(() -> {
                    telemetry.addLine("Arrived at " + position + " spike mark");
                })
        );

        while (!follower.isPathComplete() && opModeIsActive()) {
            robot.update();
            follower.update();
            robot.setDrivePowers(follower.getMotorPowers());

            telemetry.addData("Progress", "%.0f%%", follower.getPathProgress() * 100);
            telemetry.update();
        }
    }
}

Example 2: Dynamic Path Selection with AI

DynamicAIAuto.java
@Autonomous(name = "Dynamic AI Auto")
public class DynamicAIAuto extends LinearOpMode {
    Robot robot;
    Follower follower;
    TfodProcessor tfod;
    VisionPortal visionPortal;

    @Override
    public void runOpMode() {
        robot = new Robot(hardwareMap, RobotConfig.class);
        robot.initializeLocalizer(hardwareMap);
        follower = new Follower(hardwareMap);

        initializeVision();
        waitForStart();

        // Phase 1: Drive forward while scanning
        follower.followPath(
            new BezierLine(new Point(20, 0, Point.CARTESIAN))
                .setConstantHeadingInterpolation(0)
                .addCallback(0.5, () -> {
                    // Scan for objects midway
                    analyzeField();
                })
        );

        executePath();

        // Phase 2: Make decision based on AI detection
        String decision = makeIntelligentDecision();

        telemetry.addData("AI Decision", decision);
        telemetry.update();

        // Phase 3: Execute chosen path
        switch (decision) {
            case "SCORE_IMMEDIATELY":
                scoreImmediately();
                break;
            case "COLLECT_MORE":
                collectAdditionalElements();
                break;
            case "PLAY_DEFENSE":
                defensivePosition();
                break;
        }
    }

    private void analyzeField() {
        List<Recognition> detections = tfod.getRecognitions();

        for (Recognition obj : detections) {
            telemetry.addData("Detected", "%s (%.0f%%)",
                obj.getLabel(), obj.getConfidence() * 100);
        }

        telemetry.update();
    }

    private String makeIntelligentDecision() {
        // AI-based decision logic
        List<Recognition> objects = tfod.getRecognitions();

        int pixelCount = 0;
        boolean clearPath = true;

        for (Recognition obj : objects) {
            if (obj.getLabel().equals("Pixel") && obj.getConfidence() > 0.7) {
                pixelCount++;
            }
            if (obj.getLabel().equals("Obstacle")) {
                clearPath = false;
            }
        }

        // Decision tree
        if (pixelCount >= 2 && clearPath) {
            return "SCORE_IMMEDIATELY";
        } else if (pixelCount == 1) {
            return "COLLECT_MORE";
        } else {
            return "PLAY_DEFENSE";
        }
    }

    private void scoreImmediately() {
        follower.followPath(
            new BezierCurve(
                new Point(20, 0, Point.CARTESIAN),
                new Point(20, 0, Point.CARTESIAN),
                new Point(30, 20, Point.CARTESIAN),
                new Point(20, 40, Point.CARTESIAN),
                new Point(8, 52, Math.toRadians(90), Point.CARTESIAN)
            ).setLinearHeadingInterpolation()
        );
        executePath();
    }

    private void collectAdditionalElements() {
        follower.followPath(
            new BezierLine(new Point(36, -12, Point.CARTESIAN))
                .setConstantHeadingInterpolation(0)
        );
        executePath();
    }

    private void defensivePosition() {
        follower.followPath(
            new BezierLine(new Point(0, 24, Point.CARTESIAN))
                .setConstantHeadingInterpolation(Math.toRadians(90))
        );
        executePath();
    }

    private void executePath() {
        while (!follower.isPathComplete() && opModeIsActive()) {
            robot.update();
            follower.update();
            robot.setDrivePowers(follower.getMotorPowers());
        }
    }

    private void initializeVision() {
        tfod = new TfodProcessor.Builder()
            .setModelAssetName("Model.tflite")
            .build();
        visionPortal = new VisionPortal.Builder()
            .setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
            .addProcessor(tfod)
            .build();
    }
}

Example 3: Real-Time Object Tracking

ObjectTrackingAuto.java
@Autonomous(name = "Real-Time Object Tracking")
public class ObjectTrackingAuto extends LinearOpMode {
    Robot robot;
    Follower follower;
    TfodProcessor tfod;
    VisionPortal visionPortal;

    @Override
    public void runOpMode() {
        robot = new Robot(hardwareMap, RobotConfig.class);
        robot.initializeLocalizer(hardwareMap);
        follower = new Follower(hardwareMap);

        initializeVision();
        waitForStart();

        // Continuously track and approach detected objects
        ElapsedTime runtime = new ElapsedTime();

        while (opModeIsActive() && runtime.seconds() < 25) { // 25 sec auto
            robot.update();

            // Get latest detections
            List<Recognition> objects = tfod.getRecognitions();

            if (objects.size() > 0) {
                // Find closest high-confidence object
                Recognition target = findBestTarget(objects);

                if (target != null) {
                    // Calculate target position
                    Point targetPoint = calculateObjectPosition(target);

                    // Create path to object
                    follower.followPath(
                        new BezierLine(targetPoint)
                            .setTangentHeadingInterpolation()
                            .addDeceleration(0.7, 1.0, 0.3)
                            .setPathEndCallback(() -> {
                                // Attempt to collect object
                                collectObject();
                            })
                    );

                    // Execute approach
                    while (!follower.isPathComplete() && opModeIsActive()) {
                        robot.update();
                        follower.update();
                        robot.setDrivePowers(follower.getMotorPowers());

                        telemetry.addData("Tracking", target.getLabel());
                        telemetry.addData("Confidence", "%.0f%%",
                            target.getConfidence() * 100);
                        telemetry.addData("Progress", "%.0f%%",
                            follower.getPathProgress() * 100);
                        telemetry.update();
                    }

                    sleep(500); // Collect time
                }
            }

            telemetry.addData("Runtime", "%.1f s", runtime.seconds());
            telemetry.update();
        }
    }

    private Recognition findBestTarget(List<Recognition> objects) {
        Recognition best = null;
        double bestScore = 0;

        for (Recognition obj : objects) {
            // Score based on confidence and size
            double score = obj.getConfidence() * obj.getWidth() * obj.getHeight();

            if (score > bestScore) {
                bestScore = score;
                best = obj;
            }
        }

        return (best != null && best.getConfidence() > 0.6) ? best : null;
    }

    private Point calculateObjectPosition(Recognition obj) {
        // Convert screen coordinates to field position
        // This is simplified - real implementation needs camera calibration

        double currentX = robot.getLocalizer().getX();
        double currentY = robot.getLocalizer().getY();

        // Estimate distance based on object size
        double distance = 1000 / obj.getWidth(); // Rough approximation

        // Lateral offset based on X position
        double centerX = (obj.getLeft() + obj.getRight()) / 2;
        double imageCenter = 320; // Half of 640px width
        double lateralOffset = (centerX - imageCenter) / 100;

        return new Point(
            currentX + distance,
            currentY + lateralOffset,
            Point.CARTESIAN
        );
    }

    private void collectObject() {
        // Mechanism control
        telemetry.addLine("Collecting object...");
        telemetry.update();
    }

    private void initializeVision() {
        tfod = new TfodProcessor.Builder()
            .setModelAssetName("ObjectModel.tflite")
            .build();
        visionPortal = new VisionPortal.Builder()
            .setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
            .addProcessor(tfod)
            .build();
    }
}

AI Integration Tips

💡 Train Custom Models

FTC allows custom TensorFlow models. Train on your specific game elements for better detection accuracy than generic models.

💡 Combine Multiple AI Techniques

Use TensorFlow for object detection, AprilTags for positioning, and OpenCV for color filtering. Each excels at different tasks.

💡 AI Makes Decisions, VingVing Executes

Perfect division of labor: AI vision detects targets and makes strategic decisions, VingVing Robot handles precise path following and positioning.

⚠️ Test Inference Speed

Complex models can be slow. Measure inference time and ensure AI decisions happen fast enough for real-time autonomous operation.

The Future is AI + VingVing

Combining computer vision with precise path following creates truly intelligent autonomous robots. VingVing Robot handles the "how to move," allowing you to focus on the "what to do" with AI.

  • Detect game elements with TensorFlow
  • Make strategic decisions with custom logic
  • Execute precise movements with VingVing Robot
  • Adapt in real-time to changing field conditions