Tabnine Logo
Frame$Builder
Code IndexAdd Tabnine to your IDE (free)

How to use
Frame$Builder
in
com.google.android.gms.vision

Best Java code snippets using com.google.android.gms.vision.Frame$Builder (Showing top 17 results out of 315)

origin: googlesamples/android-vision

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: googlesamples/android-vision

Frame frame = new Frame.Builder().setBitmap(bitmap).build();
SparseArray<Face> faces = safeDetector.detect(frame);
origin: thegenuinegourav/Questor

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: peekler/GDG

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: EzequielAdrianM/Camera2Vision

outputFrame = new Frame.Builder()
    .setImageData(ByteBuffer.wrap(quarterNV21(mPendingFrameData, mPreviewSize.getWidth(), mPreviewSize.getHeight())), mPreviewSize.getWidth()/4, mPreviewSize.getHeight()/4, ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(getDetectorOrientation(mSensorOrientation))
    .build();
origin: adafruit/Bluefruit_LE_Connect_Android

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: EzequielAdrianM/Camera2Vision

int previewH = mPreviewSize.getHeight();
outputFrame = new Frame.Builder()
    .setImageData(quarterNV21(mPendingFrameData, previewW, previewH), previewW/4, previewH/4, ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: TrustWallet/trust-wallet-android-source

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: EdwardvanRaak/MaterialBarcodeScanner

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: komamitsu/Android-OCRSample

Frame frame = new Frame.Builder().setBitmap(bitmap).build();
SparseArray<TextBlock> origTextBlocks = textRecognizer.detect(frame);
List<TextBlock> textBlocks = new ArrayList<>();
origin: aryarohit07/PicassoFaceDetectionTransformation

/**
 * Calculates a point (focus point) in the bitmap, around which cropping needs to be performed.
 *
 * @param bitmap           Bitmap in which faces are to be detected.
 * @param centerOfAllFaces To store the center point.
 */
private void detectFace(Bitmap bitmap, PointF centerOfAllFaces) {
  FaceDetector faceDetector = PicassoFaceDetector.getFaceDetector();
  if (!faceDetector.isOperational()) {
    centerOfAllFaces.set(bitmap.getWidth() / 2, bitmap.getHeight() / 2); // center crop
    return;
  }
  Frame frame = new Frame.Builder().setBitmap(bitmap).build();
  SparseArray<Face> faces = faceDetector.detect(frame);
  final int totalFaces = faces.size();
  if (totalFaces > 0) {
    float sumX = 0f;
    float sumY = 0f;
    for (int i = 0; i < totalFaces; i++) {
      PointF faceCenter = new PointF();
      getFaceCenter(faces.get(faces.keyAt(i)), faceCenter);
      sumX = sumX + faceCenter.x;
      sumY = sumY + faceCenter.y;
    }
    centerOfAllFaces.set(sumX / totalFaces, sumY / totalFaces);
    return;
  }
  centerOfAllFaces.set(bitmap.getWidth() / 2, bitmap.getHeight() / 2); // center crop
}
origin: aryarohit07/GlideFaceDetectionTransformation

/**
 * Calculates a point (focus point) in the bitmap, around which cropping needs to be performed.
 *
 * @param bitmap           Bitmap in which faces are to be detected.
 * @param centerOfAllFaces To store the center point.
 */
private void detectFace(Bitmap bitmap, PointF centerOfAllFaces) {
  FaceDetector faceDetector = GlideFaceDetector.getFaceDetector();
  if (!faceDetector.isOperational()) {
    centerOfAllFaces.set(bitmap.getWidth() / 2, bitmap.getHeight() / 2); // center crop
    return;
  }
  Frame frame = new Frame.Builder().setBitmap(bitmap).build();
  SparseArray<Face> faces = faceDetector.detect(frame);
  final int totalFaces = faces.size();
  if (totalFaces > 0) {
    float sumX = 0f;
    float sumY = 0f;
    for (int i = 0; i < totalFaces; i++) {
      PointF faceCenter = new PointF();
      getFaceCenter(faces.get(faces.keyAt(i)), faceCenter);
      sumX = sumX + faceCenter.x;
      sumY = sumY + faceCenter.y;
    }
    centerOfAllFaces.set(sumX / totalFaces, sumY / totalFaces);
    return;
  }
  centerOfAllFaces.set(bitmap.getWidth() / 2, bitmap.getHeight() / 2); // center crop
}
origin: PaulTR/AndroidDemoProjects

public void setBitmap( Bitmap bitmap ) {
  mBitmap = bitmap;
  FaceDetector detector = new FaceDetector.Builder( getContext() )
      .setTrackingEnabled(true)
      .setLandmarkType(FaceDetector.ALL_LANDMARKS)
      .setMode(FaceDetector.ACCURATE_MODE)
      .build();
  if (!detector.isOperational()) {
    //Handle contingency
  } else {
    Frame frame = new Frame.Builder().setBitmap(bitmap).build();
    mFaces = detector.detect(frame);
    detector.release();
  }
  logFaceData();
  invalidate();
}
origin: googlesamples/android-vision

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, previewSize.getWidth(),
        previewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(rotation)
    .build();
origin: googlesamples/android-vision

outputFrame = new Frame.Builder()
    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
        mPreviewSize.getHeight(), ImageFormat.NV21)
    .setId(mPendingFrameId)
    .setTimestampMillis(mPendingTimeMillis)
    .setRotation(mRotation)
    .build();
origin: googlesamples/android-vision

return new Frame.Builder()
    .setImageData(paddedBuffer, width, newHeight, ImageFormat.NV21)
    .setId(metadata.getId())
    .setRotation(metadata.getRotation())
    .setTimestampMillis(metadata.getTimestampMillis())
    .build();
origin: googlesamples/android-vision

/**
 * Creates a new frame based on the original frame, with additional width on the right to
 * increase the size to avoid the bug in the underlying face detector.
 */
private Frame padFrameRight(Frame originalFrame, int newWidth) {
  Frame.Metadata metadata = originalFrame.getMetadata();
  int width = metadata.getWidth();
  int height = metadata.getHeight();
  Log.i(TAG, "Padded image from: " + width + "x" + height + " to " + newWidth + "x" + height);
  ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
  int origOffset = origBuffer.arrayOffset();
  byte[] origBytes = origBuffer.array();
  // This can be changed to just .allocate in the future, when Frame supports non-direct
  // byte buffers.
  ByteBuffer paddedBuffer = ByteBuffer.allocateDirect(newWidth * height);
  int paddedOffset = paddedBuffer.arrayOffset();
  byte[] paddedBytes = paddedBuffer.array();
  Arrays.fill(paddedBytes, (byte) 0);
  for (int y = 0; y < height; ++y) {
    int origStride = origOffset + y * width;
    int paddedStride = paddedOffset + y * newWidth;
    System.arraycopy(origBytes, origStride, paddedBytes, paddedStride, width);
  }
  return new Frame.Builder()
      .setImageData(paddedBuffer, newWidth, height, ImageFormat.NV21)
      .setId(metadata.getId())
      .setRotation(metadata.getRotation())
      .setTimestampMillis(metadata.getTimestampMillis())
      .build();
}
com.google.android.gms.visionFrame$Builder

Most used methods

  • <init>
  • build
  • setId
  • setImageData
  • setRotation
  • setTimestampMillis
  • setBitmap

Popular in Java

  • Creating JSON documents from java classes using gson
  • setRequestProperty (URLConnection)
  • getSupportFragmentManager (FragmentActivity)
  • findViewById (Activity)
  • Kernel (java.awt.image)
  • BufferedWriter (java.io)
    Wraps an existing Writer and buffers the output. Expensive interaction with the underlying reader is
  • Date (java.sql)
    A class which can consume and produce dates in SQL Date format. Dates are represented in SQL as yyyy
  • Options (org.apache.commons.cli)
    Main entry-point into the library. Options represents a collection of Option objects, which describ
  • Logger (org.apache.log4j)
    This is the central class in the log4j package. Most logging operations, except configuration, are d
  • Logger (org.slf4j)
    The org.slf4j.Logger interface is the main user entry point of SLF4J API. It is expected that loggin
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now