Source: tfsdk.js

"use strict";

const FFI = require("ffi-napi");
const Ref = require("ref-napi");
const ArrayType = require("ref-array-di")(Ref);
const StructType = require("ref-struct-di")(Ref);

class TFSDKDataChecker {
  constructor() {
    this.exception = false;
    this.consoleError = false;
  }
  throw(message) {
    if (this.exception) throw new Error(message);
    if (this.consoleError) console.error(message);
    return 6;
  }
  isString(value) {
    return typeof value === "string" || value instanceof String;
  }
  isNumber(value) {
    return typeof value === "number" && isFinite(value);
  }
  isArray(value) {
    return value && typeof value === "object" && value.constructor === Array;
  }
  isObject(value) {
    return value && typeof value === "object" && value.constructor === Object;
  }
  isNull(value) {
    return value === null;
  }
  isUndefined(value) {
    return typeof value === "undefined";
  }
  isBoolean(value) {
    return typeof value === "boolean";
  }
}

module.exports = class TFSDK {
  constructor(options = {}, libPath = "./artifacts/") {
    this.dc = new TFSDKDataChecker();

    this.int = Ref.types.int;
    this.IntArray = ArrayType(this.int);

    const libType = "void";
    const libPtr = Ref.refType(libType);

    this.licensed = false;

    this.imageProperties = StructType({
      width: Ref.types.uint,
      height: Ref.types.uint,
      channels: Ref.types.uint,
      isGpuImage: Ref.types.bool,
    });
    const ImagePropertiesPtr = Ref.refType(this.imageProperties);

    this.lib = FFI.Library(`${libPath}/libtfsdk`, {
      TFSDK__create: [libPtr, []],
      TFSDK__json_createWithConfigurationOptions: [libPtr, ["string"]],
      TFSDK__destory: ["void", [libPtr]],

      TFSDK__setLicense: ["bool", [libPtr, "string"]],
      TFSDK__isLicensed: ["bool", [libPtr]],
      TFSDK__getVersion: ["double", [libPtr]],

      TFSDK__setImage: ["int", [libPtr, this.IntArray, "int", "int", "int"]],
      TFSDK__setImageFromFile: ["int", [libPtr, "string"]],
      TFSDK__estimateFaceImageQuality: [this.IntArray, [libPtr, "double"]],

      TFSDK__json_detectObjects: ["string", [libPtr]],
      TFSDK__json_detectLargestFace: ["string", [libPtr]],
      TFSDK__json_detectFaces: ["string", [libPtr]],
      TFSDK__json_getLargestFaceFeatureVector: ["string", [libPtr]],
      TFSDK__json_getSimilarity: ["string", [libPtr, "string", "string"]],
      TFSDK__json_estimateHeadOrientation: ["string", [libPtr, "string"]],
      // TFSDK__json_detectBlink: ["string", [libPtr]],
      TFSDK__json_getFaceLandmarks: ["string", [libPtr, "string"]],
      TFSDK__createDatabaseConnection: ["int", [libPtr, "string"]],
      TFSDK__createLoadCollection: ["int", [libPtr, "string"]],
      TFSDK__json_enrollFaceprint: ["string", [libPtr, "string", "string"]],
      TFSDK__removeByUUID: ["int", [libPtr, "string"]],
      TFSDK__removeByIdentity: ["int", [libPtr, "string"]],
      TFSDK__json_identifyTopCandidate: [
        "string",
        [libPtr, "string", Ref.types.float],
      ],
      TFSDK__getImageProperties: ["string", [libPtr, ImagePropertiesPtr]],
      TFSDK__getFaceLandmarks: ["string", [libPtr, "string"]],
      // TFSDK__json_getFaceLandmarks: ["string", [libPtr, "string"]],

      TFSDK__json_getFaceFeatureVector: ["string", [libPtr, "string"]],
      TFSDK__detectActiveSpoof: ["string", [libPtr, "string", "string"]],
      TFSDK__checkSpoofImageFaceSize: ["string", [libPtr, "string", "string"]],
      TFSDK__json_detectMask: ["string", [libPtr, "string"]],
      TFSDK__json_detectGlasses: ["string", [libPtr, "string"]],
      TFSDK__extractAlignedFace: [this.IntArray, [libPtr, "string"]],
    });

    this.ptr = Ref.alloc(libPtr);

    if (options && Object.keys(options).length === 0) {
      this.ptr = this.lib.TFSDK__create();
    } else {
      this.ptr = this.lib.TFSDK__json_createWithConfigurationOptions(
        JSON.stringify(options)
      );
    }
  }
  getColorSpace(bitmap) {
    if (!this.dc.isObject(bitmap) || !this.dc.isBoolean(bitmap._rgba))
      return this.dc.throw(`bitmap: must be a object, ${bitmap} was given`);
    if (bitmap._rgba === true) {
      return 3;
    } else {
      return 0;
    }
  }
  /**
   * Return the text description of the error code
   * @function getError
   * @param {string} error code as number
   * @returns {string} the description of error code
   */
  getError(error) {
    if (!this.dc.isNumber(error))
      return this.dc.throw(`error: must be a number, ${error} was given`);
    const ErrorCode = [
      "NO_ERROR",
      "INVALID_LICENSE",
      "FILE_READ_FAIL",
      "UNSUPPORTED_IMAGE_FORMAT",
      "UNSUPPORTED_MODEL",
      "NO_FACE_IN_FRAME",
      "FAILED",
      "COLLECTION_CREATION_ERROR",
      "DATABASE_CONNECTION_ERROR",
      "ENROLLMENT_ERROR",
    ];
    return ErrorCode[error];
  }

  /**
   * Sets and validates the given license token. Need to call this method
   * before being able to use the SDK.
   * @function setLicense
   *
   * @param {string} token the license token (if you do not have this talk to
   *        support@trueface.ai).
   * @returns {boolean} Whether the given license token is valid.
   */
  setLicense(license) {
    if (!this.dc.isString(license))
      return this.dc.throw(`license: must be a string, ${license} was given`);
    const res = this.lib.TFSDK__setLicense(this.ptr, license);
    this.licensed = res;
    return res;
  }
  /**
   * Checks whether the given license token is valid and you can use
   * the SDK.
   *
   * @function isLicensed
   * @returns {boolean} Whether the given license token is valid.
   */
  isLicensed() {
    return this.licensed;
  }
  /**
   * Gets the version-build number of the SDK.
   * @function getVersion
   *
   * @returns {string} Version Number.
   */
  getVersion() {
    return this.lib.TFSDK__getVersion(this.ptr);
  }
  /**
   * Set the image that is processed by the other methods.
   * @function setImage
   *
   * @param {Buffer} image an 8-bit decoded image array, in the CPU memory or the GPU memory.
   * @param {number} width the image width.
   * @param {number} height the image height.
   * @param {number} color the image color model, see ColorCode.
   *
   * @returns {object} error code, see {@link exports.getError}ErrorCode.
   * Note, it is highly encouraged to check the return value from setImage before proceeding.
   * If the license is invalid, the INVALID_LICENSE error will be returned.
   */
  setImage(data, width, height, color) {
    if (!this.dc.isArray(data))
      return this.dc.throw(`data: must be an array, ${data} was given`);
    if (!this.dc.isNumber(width))
      return this.dc.throw(`width: must be a number, ${width} was given`);
    if (!this.dc.isNumber(height))
      return this.dc.throw(`height: must be a number, ${height} was given`);
    if (!this.dc.isNumber(color))
      return this.dc.throw(`color: must be a number, ${color} was given`);

    const arr = new this.IntArray(data.length);
    for (const [i, v] of data.entries()) arr[i] = v;
    return this.lib.TFSDK__setImage(this.ptr, arr, width, height, color);
  }
  /**
   * Set the image from a image file
   * @function setImageFromFile
   *
   * @param {string} path image's path
   * @returns {object} errorCode if there's error from set image
   */
  setImageFromFile(path) {
    if (!this.dc.isString(path))
      return this.dc.throw(`path: must be a string, ${path} was given`);
    return this.lib.TFSDK__setImageFromFile(this.ptr, path);
  }
  /**
   * Detect and identify all the objects in the image.
   * @function detectObjects
   * @returns {object} errorCode, see ErrorCode.
   *
   * @returns {object} array of object description, object location in the image.
   */
  detectObjects() {
    const result = this.lib.TFSDK__json_detectObjects(this.ptr);
    return JSON.parse(result);
  }
  /**
   * Detect all the faces in the image. This method has a small false positive rate.
   * To reduce the false positive rate to near zero, filter out faces with score lower than 0.90.
   * Alternatively, you can use the `Trueface::FaceDetectionFilter`
   * configuration option to filter the detected faces.
   *
   * The face detector has a detection scale range of about 5 octaves. \ref ConfigurationOptions.smallestFaceHeight
   * determines the lower of the detection scale range. E.g., setting \ref ConfigurationOptions.smallestFaceHeight to
   * 40 pixels yields the detection scale range of ~40 pixels to 1280 (=40x2^5) pixels.
   *
   * @function detectFaces
   * @returns {object} faceBoxAndLandmarks a vector of \ref FaceBoxAndLandmarks representing each of the detected faces.
   * If not faces are found, the vector will be empty. The detected faces are sorted in order of descending face score.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectFaces() {
    const result = this.lib.TFSDK__json_detectFaces(this.ptr);
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image.
   * This method has a small false positive rate.
   * To reduce the false positive rate to near zero, filter out faces with score lower than 0.90.
   * Alternatively, you can use the `Trueface::FaceDetectionFilter`
   * configuration option to filter the detected faces.
   * See detectFaces() for the detection scale range.
   *
   * @function detectLargestFace
   * @returns {object} faceBoxAndLandmarks the FaceBoxAndLandmarks containing the landmarks and bounding box
   * of the largest detected face in the image.
   * @returns {boolean} found whether a face was found in the image.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectLargestFace() {
    const result = this.lib.TFSDK__json_detectLargestFace(this.ptr);
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image and return its feature
   * vector.
   *
   * @function getLargestFaceFeatureVector
   * @returns {object} Faceprint object which will contain
   *             the face feature vector.
   * @returns {boolean} foundFace indicates if a face was detected in the image.
   * If no face was detected, then the faceprint will be empty.
   *
   * @returns {object} error code, see ErrorCode{@link getError}.
   */
  getLargestFaceFeatureVector() {
    const result = this.lib.TFSDK__json_getLargestFaceFeatureVector(this.ptr);
    return JSON.parse(result);
  }
  /**
   * Compute the similarity between two feature vectors, or how
   * similar two faces are.
   *
   * @function getSimilarity
   * @param {object} faceprint1 the first Faceprint to be compared.
   * @param {object} faceprint2 the second Faceprint to be compared.
   * @returns {number} matchProbability the probability the two face feature vectors are a match.
   * @returns {number} similarityMeasure the computed similarity measure.
   *
   * @returns error code, see ErrorCode.
   */
  getSimilarity(faceprint1, faceprint2) {
    if (!this.dc.isObject(faceprint1))
      return this.dc.throw(
        `faceprint1: must be a object, ${faceprint1} was given`
      );
    if (!this.dc.isObject(faceprint2))
      return this.dc.throw(
        `faceprint2: must be a object, ${faceprint2} was given`
      );
    const result = this.lib.TFSDK__json_getSimilarity(
      this.ptr,
      JSON.stringify(faceprint1),
      JSON.stringify(faceprint2)
    );
    return JSON.parse(result);
  }
  /**
   * Estimate the head pose.
   *
   * @function estimateHeadOrientation
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   *
   * @returns {object} <b>yaw</b> the rotation angle around the image's vertical axis, in radians. <b>pitch</b> the rotation angle around the image's transverse axis, in radians. <b>roll<b> the rotation angle around the image's longitudinal axis, in radians.
   * @returns {object} error code, see ErrorCode.
   */
  estimateHeadOrientation(face) {
    if (!this.dc.isObject(face))
      return this.dc.throw(`face: must be a object, ${face} was given`);
    const result = this.lib.TFSDK__json_estimateHeadOrientation(
      this.ptr,
      JSON.stringify(face)
    );
    return JSON.parse(result);
  }
  /*
  detectBlink() {
    const result = this.lib.TFSDK__json_detectBlink(this.ptr);
    return JSON.parse(result);
  }
  */
  /**
   *
   * Create a connection to a new or existing database.
   * If the database does not exist, a new one will be created with the provided name.
   * If the `Trueface::DatabaseManagementSystem::NONE` memory only
   * configuration option is selected, this function does not need to be called (and is a harmless no-op).
   *
   *
   * If `Trueface::DatabaseManagementSystem::SQLITE` is selected, this should be the filepath to the database.
   * ex. "/myPath/myDatabase.db".
   * If `Trueface::DatabaseManagementSystem::POSTGRESQL`
   * is selected, this should be a database connection string.
   * <a href="https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS">Here</a>
   * is a list of all supported PostgreSQL connection parameters.
   * @function createDatabaseConnection
   * @example
   * "hostaddr=192.168.1.0 port=5432 dbname=face_recognition user=postgres password=my_password"
   * @example
   * "host=localhost port=5432 dbname=face_recognition user=postgres password=m_password"
   * To enable ssl, add "sslmode=require" to the connection string.
   *
   * @param {string} databaseConnectionString
   *
   * @returns {object} error code, see ErrorCode.
   */
  createDatabaseConnection(databaseConnectionString) {
    if (!this.dc.isString(databaseConnectionString))
      return this.dc.throw(
        `databaseConnectionString: must be a string, ${databaseConnectionString} was given`
      );
    return this.lib.TFSDK__createDatabaseConnection(
      this.ptr,
      databaseConnectionString
    );
  }
  /**
   * Create a new collection, or load data from an existing collection into memory (RAM) if one with the
   * provided name already exists in the database.
   * Equivalent to calling createCollection() then loadCollection().
   *
   * @function createLoadCollection
   * @param {string} collectionName the name of the collection.
   *
   * @returns {object} error code, see ErrorCode.
   */
  createLoadCollection(collectionName) {
    if (!this.dc.isString(collectionName))
      return this.dc.throw(
        `collectionName: must be a string, ${collectionName} was given`
      );
    return this.lib.TFSDK__createLoadCollection(this.ptr, collectionName);
  }
  /**
   * Enroll a Faceprint for a new or existing identity in the collection.
   *
   * @function enrollFaceprint
   * @param {object} faceprint the Faceprint to enroll in the collection.
   * @param {string} identity the identity corresponding to the Faceprint.
   * @returns {string} UUID universally unique identifier corresponding to the Faceprint.
   *
   * @returns {object} error code, see ErrorCode.
   */
  enrollFaceprint(faceprint, identity) {
    if (!this.dc.isObject(faceprint))
      return this.dc.throw(
        `faceprint: must be a object, ${faceprint} was given`
      );
    if (!this.dc.isString(identity))
      return this.dc.throw(`identity: must be a string, ${identity} was given`);
    const result = this.lib.TFSDK__json_enrollFaceprint(
      this.ptr,
      JSON.stringify(faceprint),
      identity
    );
    return JSON.parse(result);
  }
  /**
   * Remove a Faceprint from the collection using the UUID.
   *
   * @function removeByUUID
   * @param {string} UUID the universally unique identifier returned by enrollFaceprint().
   *
   * @returns {object} error code, see ErrorCode.
   */
  removeByUUID(UUID) {
    if (!this.dc.isString(UUID))
      return this.dc.throw(`UUID: must be a string, ${UUID} was given`);
    return this.lib.TFSDK__removeByUUID(this.ptr, UUID);
  }
  /**
   * Remove all Faceprints in the collection corresponding to the identity.
   *
   * @function removeByIdentity
   * @param {string} identity the identity to remove from the collection.
   * @returns {number} numFaceprintsRemoved the the number of Faceprint which were removed for that identity.
   *
   * @returns {object} error code, see ErrorCode.
   */
  removeByIdentity(identity) {
    if (!this.dc.isString(UUID))
      return this.dc.throw(`identity: must be a string, ${identity} was given`);
    return this.lib.TFSDK__removeByIdentity(this.ptr, identity);
  }
  /**
   * Get the top match Candidate in the collection and the corresponding similarity score and match probability.
   *
   * @function identifyTopCandidate
   * @param {object} faceprint the Faceprint to be identified.
   * @param {number }[in] threshold the similarity score threshold above which it is considered a match.
   * Higher thresholds may result in faster queries.
   * Refer to our <a href="https://docs.trueface.ai/ROC-Curves-d47d2730cf0a44afacb39aae0ed1b45a">ROC curves</a> when selecting a threshold.
   *
   * @returns {string} candidate the top match Candidate.
   * @returns {boolean} found set to true if a match is found.
   *
   * @returns {object} error code, see ErrorCode.
   */
  identifyTopCandidate(faceprint, threshold = 0.3) {
    if (!this.dc.isObject(faceprint))
      return this.dc.throw(
        `faceprint: must be an object, ${faceprint} was given`
      );
    if (!this.dc.isNumber(threshold))
      return this.dc.throw(
        `threshold: must be a float, ${threshold} was given`
      );
    const result = this.lib.TFSDK__json_identifyTopCandidate(
      this.ptr,
      JSON.stringify(faceprint),
      threshold
    );
    // console.log("result", result);
    return JSON.parse(result);
  }
  /**
   * Get properties of the image set by setImage().
   *
   * @function getImageProperties
   * @returns {object} imageProperties the image properties
   */
  getImageProperties() {
    // need to use json format
    /*
    struct ImageProperties {
        unsigned int width = 0;
        unsigned int height = 0;
        unsigned int channels = 0;
        bool isGpuImage = false;
    };
    assert(imageProperties.height == 200);
    assert(imageProperties.width == 192);
    assert(imageProperties.channels == 3);
    assert(imageProperties.isGpuImage == false);
    */

    /* const ImageProperties = StructType({
      width: Ref.types.uint,
      height: Ref.types.uint,
      channels: Ref.types.uint,
      isGpuImage: Ref.types.bool
    }); */

    const ip = new this.imageProperties({
      width: 0,
      height: 0,
      channels: 0,
      isGpuImage: false,
    });
    this.lib.TFSDK__getImageProperties(this.ptr, ip.ref());
    return ip;
  }
  /**
   * Obtain the 106 face landmarks.
   *
   * @function getFaceLandmarks
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by {@link detectFaces()} or {@link detectLargestFace()}.
   * @returns {object} landmarks an array of 106 face landmark points.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getFaceLandmarks(faceBoxAndLandmarks) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_getFaceLandmarks(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Extract the face feature vector from an aligned face image.
   *
   * @function getFaceFeatureVector
   * @param {object} alignedFaceImage buffer returned by extractAlignedFace().
   * Face image must be have size of 112x112 pixels (default extractAlignedFace() margin and scale values).
   * @param {object} faceprint a Faceprint object which will contain
   *             the face feature vector.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getFaceFeatureVector(faceBoxAndLandmarks) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_getFaceFeatureVector(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Detect the largest face in the image and return its feature
   * vector.
   *
   * @function getLargestFaceFeatureVector
   * @returns {object} faceprint a Faceprint object which will contain
   *             the face feature vector.
   * @returns {object} foundFace indicates if a face was detected in the image.
   * If no face was detected, then the faceprint will be empty.
   *
   * @returns {object} error code, see ErrorCode.
   */
  getLargestFaceFeatureVector() {
    const result = this.lib.TFSDK__json_getLargestFaceFeatureVector(this.ptr);
    return JSON.parse(result);
  }
  /**
   *  Align the the detected face to be optimized for passing to
   *  feature extraction. If using the face chip with Trueface algorithms (ex face recognition),
   *  do not change the default margin and scale values.
   *
   * @function extractAlignedFace
   * @param {object} faceBoxAndLandmarks the FaceBoxAndLandmarks returned
   *            by detectLargestFace() or detectFaces().
   * @returns {object} faceImage the pointer to a uint8_t buffer of
   *             112x112x3 = 37632 bytes (when using default margins and scale).
   *             The aligned face image is stored in this buffer.
   *             The memory must be allocated by the user.
   *             If using non-default margin and scale
   *             (again, non-standard face chip sizes will not work with Trueface algorithms),
   *             the faceImage will be of size:
   *             width = int((112+marginLeft+marginRight)*scale),
   *             height = int((112+marginTop+marginBottom)*scale),
   *             and therefore the buffer size is computed as:
   *             width * height * 3
   *
   * @param {number} marginLeft adds a margin to the left side of the face chip.
   * @param {number} marginTop adds a margin to the top side of the face chip.
   * @param {number} marginRight adds a margin to the right side of the face chip.
   * @param {number} marginBottom adds a margin to the bottom side of the face chip.
   * @param {number} scale changes the scale of the face chip.
   *
   * @returns error code, see ErrorCode.
   */
  extractAlignedFace(faceBoxAndLandmarks) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__extractAlignedFace(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return result;
  }
  /**
   * Estimate the quality of the face image for recognition.
   *
   * @function estimateFaceImageQuality
   * @param {array} alignedFaceImage The array returned by extractAlignedFace().
   * @returns {number} quality a value between 0 to 1, 1 being prefect quality for recognition.
   * We suggest using a threshold of 0.999 as a filter for enrollment images.
   *
   * @returns {object} error code, see ErrorCode.
   */
  estimateFaceImageQuality(alignedFaceImage) {
    if (!this.dc.isArray(alignedFaceImage))
      return this.dc.throw(
        `alignedFaceImage: must be a array, ${alignedFaceImage} was given`
      );
    const arr = new this.IntArray(alignedFaceImage.length);
    for (const [i, v] of alignedFaceImage.entries()) arr[i] = v;
    return this.lib.TFSDK__estimateFaceImageQuality(this.ptr, arr);
  }
  /**
   * Detect if there is a presentation attack attempt.
   * Must call checkSpoofImageFaceSize() on both input faces before calling this function.
   *
   * @function detectActiveSpoof
   * @param {object} nearFaceLandmarks The face landmarks of the near face, obtained by calling getFaceLandmarks().
   * @param {object} farFaceLandmarks The face landmarks of the far face, obtained by calling getFaceLandmarks().
   * @returns {number} spoofScore The output spoof score.
   * If the spoof score is above the threshold, then it is classified as a real face.
   * If the spoof score is below the threshold, then it is classified as a fake face.
   * @returns {boolean} spoofPrediction The predicted spoof result, using a spoofScore threshold of 1.05.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectActiveSpoof(nearFaceLandmarks, farFaceLandmarks) {
    if (!this.dc.isObject(nearFaceLandmarks))
      return this.dc.throw(
        `nearFaceLandmarks: must be an object, ${nearFaceLandmarks} was given`
      );
    if (!this.dc.isObject(farFaceLandmarks))
      return this.dc.throw(
        `nearFaceLandmarks: must be an object, ${farFaceLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectActiveSpoof(
      this.ptr,
      JSON.stringify(nearFaceLandmarks),
      JSON.stringify(farFaceLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Ensures that the face size meets the requirements for active spoof.
   * Must check return value of function!
   * Active spoof works by analyzing the way a persons face changes as they move closer to a camera.
   * The active spoof solution therefore expects the face a certain distance from the camera.
   * **In the far image, the face should be about 18 inches from the camera, while in the near image,
   * the face should be 7-8 inches from the camera.**
   * This function must be called before calling detectActiveSpoof().
   *
   * @function checkSpoofImageFaceSize
   * @param {object} faceBoxAndLandmarks The face on which to run active spoof detection.
   * @param {object} imageProperties The properties of the image, obtained from getImageProperties().
   * @param {object} activeSpoofStage The stage of the image, either near stage or far stage.
   *
   * @returns {object} error code, see ErrorCode.
   * If `ErrorCode::NO_ERROR` is returned, then the image is eligible for active spoof detection.
   * If `ErrorCode::FACE_TOO_CLOSE` or `ErrorCode::FACE_TOO_FAR` is returned, the image is not eligible for active spoof detection.
   *
   */
  checkSpoofImageFaceSize(faceBoxAndLandmarks, imageProperties) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    if (!this.dc.isObject(imageProperties))
      return this.dc.throw(
        `imageProperties: must be an object, ${imageProperties} was given`
      );
    const result = this.lib.TFSDK__json_checkSpoofImageFaceSize(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks),
      JSON.stringify(imageProperties)
    );
    return JSON.parse(result);
  }
  /**
   * Detect whether the face in the image is wearing a mask or not
   *
   * @function detectMask
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   * @returns {boolean} result The predicted MaskLabel for face image.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectMask(faceBoxAndLandmarks) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectMask(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
  /**
   * Detect whether the face in the image is wearing any type of eye glasses or not
   *
   * @function detectGlasses
   * @param {object} faceBoxAndLandmarks FaceBoxAndLandmarks returned by detectFaces() or detectLargestFace().
   * @param {boolean} result The predicted GlassesLabel for face image.
   * @param {score} glassesScore The glasses score for this image. This can be used for setting custom thresholds that work
   * better for the use case. By default, we use a glasses score greater than 0.0 to determine that glasses were detected.
   *
   * @returns {object} error code, see ErrorCode.
   */
  detectGlasses(faceBoxAndLandmarks) {
    if (!this.dc.isObject(faceBoxAndLandmarks))
      return this.dc.throw(
        `faceBoxAndLandmarks: must be an object, ${faceBoxAndLandmarks} was given`
      );
    const result = this.lib.TFSDK__json_detectGlasses(
      this.ptr,
      JSON.stringify(faceBoxAndLandmarks)
    );
    return JSON.parse(result);
  }
};