Source: tfsdk.js

"use strict";

const FFI = require("ffi-napi");
const Ref = require("ref-napi");
const ArrayType = require("ref-array-di")(Ref);
const StructType = require("ref-struct-di")(Ref);

// why it has to be top level function?
// const cleanUpTfImage = new FinalizationRegistry((o) => {
//   console.log("cleanUpTfImage called");
//   o.sdk.TFSDK__destroy_TF_Image(o.pointer);
// });

class Image {
  errorCode = -1;
  // factory
  constructor(sdk, image) {
    this.sdk = sdk;
    this.tfimage = image;
  }
  get pointer() {
    return this.tfimage.deref();
  }
  deref() {
    return this.tfimage.deref();
  }
  save(path) {
    return this.sdk.saveImage(this.tfimage, path);
  }
  rotate(degree) {
    return this.sdk.rotateImage(this.tfimage, degree);
  }
  destroy() {
    return this.sdk.TFSDK__destroy_TF_Image(this.tfimage);
  }
}

class TFSDKDataChecker {
  constructor() {
    this.exception = false;
    this.consoleError = false;
  }
  throw(message) {
    if (this.exception) throw new Error(message);
    if (this.consoleError) console.error(message);
    return 6;
  }
  isString(value) {
    return typeof value === "string" || value instanceof String;
  }
  isNumber(value) {
    return typeof value === "number" && isFinite(value);
  }
  isArray(value) {
    return value && typeof value === "object" && value.constructor === Array;
  }
  isObject(value) {
    return value && typeof value === "object" && value.constructor === Object;
  }
  isNull(value) {
    return value === null;
  }
  isUndefined(value) {
    return typeof value === "undefined";
  }
  isBoolean(value) {
    return typeof value === "boolean";
  }
}

module.exports = class TFSDK {
  constructor(options = {}, libPath = "./artifacts/") {
    this.dc = new TFSDKDataChecker();

    this.uint = Ref.types.uint;
    this.int = Ref.types.int;
    this.IntArray = ArrayType(this.int);

    const libType = "void";
    const libPtr = Ref.refType(libType);

    this.imageType = "void";
    this.imagePtr = Ref.refType(this.imageType);

    this.imageProperties = StructType({
      width: Ref.types.uint,
      height: Ref.types.uint,
      channels: Ref.types.uint,
      isGpuImage: Ref.types.bool,
    });
    const ImagePropertiesPtr = Ref.refType(this.imageProperties);

    this.lib = FFI.Library(`${libPath}/libtfsdk`, {
      TFSDK__create: [libPtr, []],
      TFSDK__json_createWithConfigurationOptions: [libPtr, ["string"]],
      TFSDK__destory: ["void", [libPtr]],

      TFSDK__setLicense: ["bool", [libPtr, "string"]],
      TFSDK__isLicensed: ["bool", [libPtr, Ref.refType(this.uint)]],
      TFSDK__getVersion: ["double", [libPtr]],

      TFSDK__preprocessImageFromFile: [
        "int",
        [libPtr, this.imagePtr, "string"],
      ],
      TFSDK__destroy_TF_Image: ["void", [Ref.refType(this.imagePtr)]],
      TFSDK__saveImage: ["void", [this.imagePtr, "string"]],
      TFSDK__rotateImage: ["void", [this.imagePtr, Ref.types.double]],

      // TFSDK__setImage: ["int", [libPtr, this.IntArray, "int", "int", "int"]],
      // TFSDK__setImageFromFile: ["int", [libPtr, "string"]],
      TFSDK__estimateFaceImageQuality: [this.IntArray, [libPtr, "double"]],

      TFSDK__json_detectObjects: ["string", [libPtr, this.imagePtr]],
      // const char *TFSDK__json_detectLargestFace(const struct TF__Image* tfImage, struct Trueface__SDK *tfsdk);
      TFSDK__json_detectLargestFace: ["string", [this.imagePtr, libPtr]],
      TFSDK__json_detectFaces: ["string", [libPtr, this.imagePtr]],
      TFSDK__json_getLargestFaceFeatureVector: [
        "string",
        [libPtr, this.imagePtr],
      ],
      TFSDK__json_getSimilarity: ["string", [libPtr, "string", "string"]],
      TFSDK__json_estimateHeadOrientation: [
        "string",
        [this.imagePtr, libPtr, "string"],
      ],
      // TFSDK__json_detectBlink: ["string", [libPtr]],
      TFSDK__json_getFaceLandmarks: [
        "string",
        [this.imagePtr, libPtr, "string"],
      ],
      TFSDK__createDatabaseConnection: ["int", [libPtr, "string"]],
      TFSDK__createLoadCollection: ["int", [libPtr, "string"]],
      TFSDK__json_enrollFaceprint: ["string", [libPtr, "string", "string"]],
      TFSDK__removeByUUID: ["int", [libPtr, "string"]],
      TFSDK__removeByIdentity: ["int", [libPtr, "string"]],
      TFSDK__json_identifyTopCandidate: [
        "string",
        [libPtr, "string", Ref.types.float],
      ],
      TFSDK__getFaceLandmarks: ["string", [this.imagePtr, libPtr, "string"]],
      // TFSDK__json_getFaceLandmarks: ["string", [libPtr, "string"]],

      TFSDK__json_getFaceFeatureVector: [
        "string",
        [this.imagePtr, libPtr, "string"],
      ],
      TFSDK__detectActiveSpoof: ["string", [libPtr, "string", "string"]],
      TFSDK__checkSpoofImageFaceSize: [
        "string",
        [this.imagePtr, libPtr, "string", "string"],
      ],
      TFSDK__json_detectMask: ["string", [this.imagePtr, libPtr, "string"]],
      TFSDK__json_detectGlasses: ["string", [this.imagePtr, libPtr, "string"]],
      TFSDK__extractAlignedFace: [
        this.IntArray,
        [this.imagePtr, libPtr, "string"],
      ],
    });

    this.ptr = Ref.alloc(libPtr);

    if (options && Object.keys(options).length === 0) {
      this.ptr = this.lib.TFSDK__create();
    } else {
      this.ptr = this.lib.TFSDK__json_createWithConfigurationOptions(
        JSON.stringify(options)
      );
    }
  }
  getColorSpace(bitmap) {
    if (!this.dc.isObject(bitmap) || !this.dc.isBoolean(bitmap._rgba))
      return this.dc.throw(`bitmap: must be a object, ${bitmap} was given`);
    if (bitmap._rgba === true) {
      return 3;
    } else {
      return 0;
    }
  }
  /**
   * Return the text description of the error code
   * @function getError
   * @param {string} error code as number
   * @returns {object} the description of error code, see {@link exports.getError}ErrorCode.
   */
  getError(error) {
    if (!this.dc.isNumber(error))
      return this.dc.throw(`error: must be a number, ${error} was given`);
    const ErrorCode = [
      "NO_ERROR",
      "INVALID_LICENSE",
      "FILE_READ_FAIL",
      "UNSUPPORTED_IMAGE_FORMAT",
      "UNSUPPORTED_MODEL",
      "NO_FACE_IN_FRAME",
      "FAILED",
      "COLLECTION_CREATION_ERROR",
      "DATABASE_CONNECTION_ERROR",
      "ENROLLMENT_ERROR",
      "MAX_COLLECTION_SIZE_EXCEEDED",
      "NO_RECORD_FOUND",
      "NO_COLLECTION_FOUND",
      "COLLECTION_DELETION_ERROR",
      "EXTREME_FACE_ANGLE",
      "FACE_TOO_CLOSE",
      "FACE_TOO_FAR",
    ];
    return ErrorCode[error];
  }

  /**
   * Sets and validates the given license token. Need to call this method
   * before being able to use the SDK.
   * @function setLicense
   *
   * @param {string} token the license token (if you do not have this talk to
   *        support@trueface.ai).
   * @returns {boolean} Whether the given license token is valid.
   */
  setLicense(license) {
    if (!this.dc.isString(license))
      return this.dc.throw(`license: must be a string, ${license} was given`);
    return this.lib.TFSDK__setLicense(this.ptr, license);
  }
  /**
   * Checks whether the given license token is valid and you can use
   * the SDK.
   *
   * @function isLicensed
   * @returns {boolean} Whether the given license token is valid.
   */
  isLicensed() {
    const remainingDays = Ref.alloc(this.uint);
    const isLicensed = this.lib.TFSDK__isLicensed(this.ptr, remainingDays);
    return isLicensed;
  }
  getRemainingDays() {
    const remainingDays = Ref.alloc(this.uint);
    this.lib.TFSDK__isLicensed(this.ptr, remainingDays);
    return remainingDays.deref();
  }
  /**
   * Gets the version-build number of the SDK.
   * @function getVersion
   *
   * @returns {string} Version Number.
   */
  getVersion() {
    return this.lib.TFSDK__getVersion(this.ptr);
  }
  /**
   * Pre-process image for SDK
   * @function preprocessImageFromFile
   *
   * @param {string} path the image file path
   * @returns {object} errorCode if there's error from process image see {@link getError} ErrorCode
   * @returns {object} the pre-processed image in buffer
   */
  preprocessImageFromFile(path) {
    if (!this.dc.isString(path))
      return this.dc.throw(`path: must be a string, ${path} was given`);
    const image = Ref.alloc(this.imagePtr);
    const errorCode = this.lib.TFSDK__preprocessImageFromFile(
      this.ptr,
      image,
      path
    );
    const tfimage = new Image(this.lib, image);
    tfimage.errorCode = errorCode;
    // cleanUpTfImage.register(tfimage, {
    //   sdk: this.lib,
    //   pointer: image.pointer
    // });
    return tfimage;
  }
  /**
   * Destroy image which is no longer needed for SDK, this is to conserve memory usage
   * @function destroyImage
   *
   * @param {buffer} image the image pointer
   */
  destroyImage(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    this.lib.TFSDK__destroy_TF_Image(image.tfimage);
  }
  /**
   * Rotate pre-processed image with certain angel degree
   * @function rotateImage
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {number} rotAngleDegrees degree in number which the image rotates
   * 
   */
  rotateImage(image, rotAngleDegrees) {
    // check for image, double types
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isNumber(rotAngleDegrees)) {
      return this.dc.throw(
        `image: must be an buffer, ${rotAngleDegrees} was given`
      );
    }
    return this.lib.TFSDK__rotateImage(image.tfimage.deref(), rotAngleDegrees);
  }
  /**
   * Save the buffer image to disk
   * @function saveImage
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {string} path the destination file path on disk
   * 
   */
  saveImage(image, path) {
    // check for image, path types
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isString(path)) {
      return this.dc.throw(`image: must be an buffer, ${path} was given`);
    }
    return this.lib.TFSDK__saveImage(image.tfimage.deref(), path);
  }
  /**
   * Detect and identify all the objects in the image.
   * @function detectObjects
   *
   * @param {buffer} image the pre-processed image in buffer
   * @returns {object} errorCode, see ErrorCode.
   *
   * @returns {object} array of object description, object location in the image.
   */
  detectObjects(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${imaimage.tfimagege} was given`);
    }
    const result = this.lib.TFSDK__json_detectObjects(image.tfimage.deref(), this.ptr);
    return JSON.parse(result);
  }
  /**
   * Detect all the faces in the image. This method has a small false positive rate.
   * To reduce the false positive rate to near zero, filter out faces with score lower than 0.90.
   * Alternatively, you can use the `Trueface::FaceDetectionFilter`
   * configuration option to filter the detected faces.
   *
   * The face detector has a detection scale range of about 5 octaves. \ref ConfigurationOptions.smallestFaceHeight
   * determines the lower of the detection scale range. E.g., setting \ref ConfigurationOptions.smallestFaceHeight to
   * 40 pixels yields the detection scale range of ~40 pixels to 1280 (=40x2^5) pixels.
   *
   * @function detectFaces
   *
   * @param {buffer} image the pre-processed image in buffer
   *
   * @returns {object} faceBoxAndLandmarks a vector of \ref FaceBoxAndLandmarks representing each of the detected faces.
   * If not faces are found, the vector will be empty. The detected faces are sorted in order of descending face score.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectFaces(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    const result = this.lib.TFSDK__json_detectFaces(image.tfimage.deref(), this.ptr);
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image.
   * This method has a small false positive rate.
   * To reduce the false positive rate to near zero, filter out faces with score lower than 0.90.
   * Alternatively, you can use the `Trueface::FaceDetectionFilter`
   * configuration option to filter the detected faces.
   * See detectFaces() for the detection scale range.
   *
   * @function detectLargestFace
   *
   * @param {buffer} image the pre-processed image in buffer
   *
   * @returns {object} faceBoxAndLandmarks the FaceBoxAndLandmarks containing the landmarks and bounding box
   * of the largest detected face in the image.
   * @returns {boolean} found whether a face was found in the image.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectLargestFace(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    const result = this.lib.TFSDK__json_detectLargestFace(
      image.tfimage.deref(),
      this.ptr
    );
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image and return its feature
   * vector.
   *
   * @function getLargestFaceFeatureVector
   *
   * @param {buffer} image the pre-processed image in buffer
   *
   * @returns {object} Faceprint object which will contain
   *             the face feature vector.
   * @returns {boolean} foundFace indicates if a face was detected in the image.
   * If no face was detected, then the faceprint will be empty.
   *
   * @returns {object} error code, see ErrorCode{@link getError}.
   */
  getLargestFaceFeatureVector(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    const result = this.lib.TFSDK__json_getLargestFaceFeatureVector(
      image.tfimage.deref(),
      this.ptr
    );
    return JSON.parse(result);
  }
  /**
   * Compute the similarity between two feature vectors, or how
   * similar two faces are.
   *
   * @function getSimilarity
   * @param {object} faceprint1 the first Faceprint to be compared.
   * @param {object} faceprint2 the second Faceprint to be compared.
   * @returns {number} matchProbability the probability the two face feature vectors are a match.
   * @returns {number} similarityMeasure the computed similarity measure.
   *
   * @returns error code, see ErrorCode.
   */
  getSimilarity(faceprint1, faceprint2) {
    if (!this.dc.isObject(faceprint1))
      return this.dc.throw(
        `faceprint1: must be a object, ${faceprint1} was given`
      );
    if (!this.dc.isObject(faceprint2))
      return this.dc.throw(
        `faceprint2: must be a object, ${faceprint2} was given`
      );
    const result = this.lib.TFSDK__json_getSimilarity(
      this.ptr,
      JSON.stringify(faceprint1),
      JSON.stringify(faceprint2)
    );
    return JSON.parse(result);
  }
  /**
   * Estimate the head pose.
   *
   * @function estimateHeadOrientation
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   *
   * @returns {object} <b>yaw</b> the rotation angle around the image's vertical axis, in radians. <b>pitch</b> the rotation angle around the image's transverse axis, in radians. <b>roll<b> the rotation angle around the image's longitudinal axis, in radians.
   * @returns {object} error code, see ErrorCode.
   */
  estimateHeadOrientation(image, face) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(face))
      return this.dc.throw(`face: must be a object, ${face} was given`);
    const result = this.lib.TFSDK__json_estimateHeadOrientation(
      image.tfimage.deref(),
      this.ptr,
      JSON.stringify(face)
    );
    return JSON.parse(result);
  }
  /*
  detectBlink() {
    const result = this.lib.TFSDK__json_detectBlink(this.ptr);
    return JSON.parse(result);
  }
  */
  /**
   *
   * Create a connection to a new or existing database.
   * If the database does not exist, a new one will be created with the provided name.
   * If the `Trueface::DatabaseManagementSystem::NONE` memory only
   * configuration option is selected, this function does not need to be called (and is a harmless no-op).
   *
   *
   * If `Trueface::DatabaseManagementSystem::SQLITE` is selected, this should be the filepath to the database.
   * ex. "/myPath/myDatabase.db".
   * If `Trueface::DatabaseManagementSystem::POSTGRESQL`
   * is selected, this should be a database connection string.
   * <a href="https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS">Here</a>
   * is a list of all supported PostgreSQL connection parameters.
   * @function createDatabaseConnection
   * @example
   * "hostaddr=192.168.1.0 port=5432 dbname=face_recognition user=postgres password=my_password"
   * @example
   * "host=localhost port=5432 dbname=face_recognition user=postgres password=m_password"
   * To enable ssl, add "sslmode=require" to the connection string.
   *
   * @param {string} databaseConnectionString
   *
   * @returns {object} error code, see ErrorCode.
   */
  createDatabaseConnection(databaseConnectionString) {
    if (!this.dc.isString(databaseConnectionString))
      return this.dc.throw(
        `databaseConnectionString: must be a string, ${databaseConnectionString} was given`
      );
    return this.lib.TFSDK__createDatabaseConnection(
      this.ptr,
      databaseConnectionString
    );
  }
  /**
   * Create a new collection, or load data from an existing collection into memory (RAM) if one with the
   * provided name already exists in the database.
   * Equivalent to calling createCollection() then loadCollection().
   *
   * @function createLoadCollection
   * @param {string} collectionName the name of the collection.
   *
   * @returns {object} error code, see ErrorCode.
   */
  createLoadCollection(collectionName) {
    if (!this.dc.isString(collectionName))
      return this.dc.throw(
        `collectionName: must be a string, ${collectionName} was given`
      );
    return this.lib.TFSDK__createLoadCollection(this.ptr, collectionName);
  }
  /**
   * Enroll a Faceprint for a new or existing identity in the collection.
   *
   * @function enrollFaceprint
   * @param {object} faceprint the Faceprint to enroll in the collection.
   * @param {string} identity the identity corresponding to the Faceprint.
   * @returns {string} UUID universally unique identifier corresponding to the Faceprint.
   *
   * @returns {object} error code, see ErrorCode.
   */
  enrollFaceprint(faceprint, identity) {
    if (!this.dc.isObject(faceprint))
      return this.dc.throw(
        `faceprint: must be a object, ${faceprint} was given`
      );
    if (!this.dc.isString(identity))
      return this.dc.throw(`identity: must be a string, ${identity} was given`);
    const result = this.lib.TFSDK__json_enrollFaceprint(
      this.ptr,
      JSON.stringify(faceprint),
      identity
    );
    return JSON.parse(result);
  }
  /**
   * Remove a Faceprint from the collection using the UUID.
   *
   * @function removeByUUID
   * @param {string} UUID the universally unique identifier returned by enrollFaceprint().
   *
   * @returns {object} error code, see ErrorCode.
   */
  removeByUUID(UUID) {
    if (!this.dc.isString(UUID))
      return this.dc.throw(`UUID: must be a string, ${UUID} was given`);
    return this.lib.TFSDK__removeByUUID(this.ptr, UUID);
  }
  /**
   * Remove all Faceprints in the collection corresponding to the identity.
   *
   * @function removeByIdentity
   * @param {string} identity the identity to remove from the collection.
   * @returns {number} numFaceprintsRemoved the the number of Faceprint which were removed for that identity.
   *
   * @returns {object} error code, see ErrorCode.
   */
  removeByIdentity(identity) {
    if (!this.dc.isString(UUID))
      return this.dc.throw(`identity: must be a string, ${identity} was given`);
    return this.lib.TFSDK__removeByIdentity(this.ptr, identity);
  }
  /**
   * Get the top match Candidate in the collection and the corresponding similarity score and match probability.
   *
   * @function identifyTopCandidate
   * @param {object} faceprint the Faceprint to be identified.
   * @param {number }[in] threshold the similarity score threshold above which it is considered a match.
   * Higher thresholds may result in faster queries.
   * Refer to our <a href="https://docs.trueface.ai/roc-curves">ROC curves</a> when selecting a threshold.
   *
   * @returns {string} candidate the top match Candidate.
   * @returns {boolean} found set to true if a match is found.
   *
   * @returns {object} error code, see ErrorCode.
   */
  identifyTopCandidate(faceprint, threshold = 0.3) {
    if (!this.dc.isObject(faceprint))
      return this.dc.throw(
        `faceprint: must be an object, ${faceprint} was given`
      );
    if (!this.dc.isNumber(threshold))
      return this.dc.throw(
        `threshold: must be a float, ${threshold} was given`
      );
    const result = this.lib.TFSDK__json_identifyTopCandidate(
      this.ptr,
      JSON.stringify(faceprint),
      threshold
    );
    // console.log("result", result);
    return JSON.parse(result);
  }
  /**
   * Obtain the 106 face landmarks.
   *
   * @function getFaceLandmarks
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by {@link detectFaces()} or {@link detectLargestFace()}.
   * @returns {object} landmarks an array of 106 face landmark points.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getFaceLandmarks(image, faceBoxAndLandmarks) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_getFaceLandmarks(
      image.tfimage.deref(),
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Extract the face feature vector from an aligned face image.
   *
   * @function getFaceFeatureVector
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} alignedFaceImage buffer returned by extractAlignedFace().
   * Face image must be have size of 112x112 pixels (default extractAlignedFace() margin and scale values).
   * @param {object} faceprint a Faceprint object which will contain
   *             the face feature vector.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getFaceFeatureVector(image, faceBoxAndLandmarks) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_getFaceFeatureVector(
      image.tfimage.deref(),
      this.ptr,
      image.tfimage,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image and return its feature
   * vector.
   *
   * @function getLargestFaceFeatureVector
   *
   * @param {buffer} image the pre-processed image in buffer
   *
   * @returns {object} faceprint a Faceprint object which will contain
   *             the face feature vector.
   * @returns {object} foundFace indicates if a face was detected in the image.
   * If no face was detected, then the faceprint will be empty.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getLargestFaceFeatureVector(image) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    const result = this.lib.TFSDK__json_getLargestFaceFeatureVector(
      image.tfimage.deref(),
      this.ptr
    );
    return JSON.parse(result);
  }
  /**
   *  Align the the detected face to be optimized for passing to
   *  feature extraction. If using the face chip with Trueface algorithms (ex face recognition),
   *  do not change the default margin and scale values.
   *
   * @function extractAlignedFace
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks the FaceBoxAndLandmarks returned
   *            by detectLargestFace() or detectFaces().
   * @returns {object} faceImage the pointer to a uint8_t buffer of
   *             112x112x3 = 37632 bytes (when using default margins and scale).
   *             The aligned face image is stored in this buffer.
   *             The memory must be allocated by the user.
   *             If using non-default margin and scale
   *             (again, non-standard face chip sizes will not work with Trueface algorithms),
   *             the faceImage will be of size:
   *             width = int((112+marginLeft+marginRight)*scale),
   *             height = int((112+marginTop+marginBottom)*scale),
   *             and therefore the buffer size is computed as:
   *             width * height * 3
   *
   * @param {number} marginLeft adds a margin to the left side of the face chip.
   * @param {number} marginTop adds a margin to the top side of the face chip.
   * @param {number} marginRight adds a margin to the right side of the face chip.
   * @param {number} marginBottom adds a margin to the bottom side of the face chip.
   * @param {number} scale changes the scale of the face chip.
   *
   * @returns error code, see ErrorCode.
   */
  extractAlignedFace(image, faceBoxAndLandmarks) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__extractAlignedFace(
      image.tfimage.deref(),
      this.ptr,
      image.tfimage,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return result;
  }
  /**
   * Detect if there is a presentation attack attempt.
   * Must call checkSpoofImageFaceSize() on both input faces before calling this function.
   *
   * @function detectActiveSpoof
   * @param {object} nearFaceLandmarks The face landmarks of the near face, obtained by calling getFaceLandmarks().
   * @param {object} farFaceLandmarks The face landmarks of the far face, obtained by calling getFaceLandmarks().
   * @returns {number} spoofScore The output spoof score.
   * If the spoof score is above the threshold, then it is classified as a real face.
   * If the spoof score is below the threshold, then it is classified as a fake face.
   * @returns {boolean} spoofPrediction The predicted spoof result, using a spoofScore threshold of 1.05.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectActiveSpoof(nearFaceLandmarks, farFaceLandmarks) {
    if (!this.dc.isObject(nearFaceLandmarks))
      return this.dc.throw(
        `nearFaceLandmarks: must be an object, ${nearFaceLandmarks} was given`
      );
    if (!this.dc.isObject(farFaceLandmarks))
      return this.dc.throw(
        `nearFaceLandmarks: must be an object, ${farFaceLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectActiveSpoof(
      this.ptr,
      JSON.stringify(nearFaceLandmarks),
      JSON.stringify(farFaceLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Ensures that the face size meets the requirements for active spoof.
   * Must check return value of function!
   * Active spoof works by analyzing the way a persons face changes as they move closer to a camera.
   * The active spoof solution therefore expects the face a certain distance from the camera.
   * **In the far image, the face should be about 18 inches from the camera, while in the near image,
   * the face should be 7-8 inches from the camera.**
   * This function must be called before calling detectActiveSpoof().
   *
   * @function checkSpoofImageFaceSize
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks The face on which to run active spoof detection.
   * @param {object} imageProperties The properties of the image, obtained from getImageProperties().
   * @param {object} activeSpoofStage The stage of the image, either near stage or far stage.
   *
   * @returns {object} error code, see ErrorCode.
   * If `ErrorCode::NO_ERROR` is returned, then the image is eligible for active spoof detection.
   * If `ErrorCode::FACE_TOO_CLOSE` or `ErrorCode::FACE_TOO_FAR` is returned, the image is not eligible for active spoof detection.
   *
   */
  checkSpoofImageFaceSize(image, faceBoxAndLandmarks, imageProperties) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    if (!this.dc.isObject(imageProperties))
      return this.dc.throw(
        `imageProperties: must be an object, ${imageProperties} was given`
      );
    const result = this.lib.TFSDK__json_checkSpoofImageFaceSize(
      imaimage.tfimagege.deref(),
      this.ptr,
      image.tfimage,
      JSON.stringify(faceBoxAndLandmarks),
      JSON.stringify(imageProperties)
    );
    return JSON.parse(result);
  }
  /**
   * Detect whether the face in the image is wearing a mask or not
   *
   * @function detectMask
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   * @returns {boolean} result The predicted MaskLabel for face image.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectMask(image, faceBoxAndLandmarks) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectMask(
      image.tfimage.deref(),
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Detect whether the face in the image is wearing any type of eye glasses or not
   *
   * @function detectGlasses
   *
   * @param {buffer} image the pre-processed image in buffer
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   * @param {boolean} result The predicted GlassesLabel for face image.
   * @param {score} glassesScore The glasses score for this image. This can be used for setting custom thresholds that work
   * better for the use case. By default, we use a glasses score greater than 0.0 to determine that glasses were detected.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectGlasses(image, faceBoxAndLandmarks) {
    if (!Buffer.isBuffer(image.tfimage)) {
      return this.dc.throw(`image: must be an buffer, ${image.tfimage} was given`);
    }
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectGlasses(
      image.tfimage.deref(),
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Destroy current SDK instance to prevent memory leak
   */
  destory() {
    this.lib.TFSDK__destory(this.lib);
  }
};