Issue
Background: I'm trying to build a chrome extension that filter images on websites visited by user. The process basically is to get the image data, pass through my trained model (transfer learning from keras), and output the predicted class. In the example, i want to check if the image belongs to dog or cat.
Sample target image url: https://i.natgeofe.com/n/4f5aaece-3300-41a4-b2a8-ed2708a0a27c/domestic-dog_thumb_4x3.jpg
Problem: While i can get the prediction of 0.70014906 in python. my extension output only 0.4576922. I suspect it is due to the way images are passed from my contentScript.js to background.js. Have been researching for a few days but i still can't get it fixed. Appreciate your help.
contentScript.js
const IMAGE_SIZE = 224;
const MIN_IMG_SIZE = 128;
function loadImageAndSendDataBack(src, sendResponse) {
const img = new Image();
img.crossOrigin = 'anonymous';
img.onerror = function(e) {
console.warn(`Could not load image from external source ${src}.`);
sendResponse({rawImageData: undefined});
return;
};
img.onload = function(e) {
if ((img.height && img.height > MIN_IMG_SIZE) ||
(img.width && img.width > MIN_IMG_SIZE)) {
img.width = IMAGE_SIZE;
img.height = IMAGE_SIZE;
const canvas = new OffscreenCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0, IMAGE_SIZE, IMAGE_SIZE);
const imageData = ctx.getImageData(0, 0, img.width, img.height);
sendResponse({
rawImageData: Array.from(imageData.data),
width: img.width,
height: img.height,
});
return;
}
// Fail out if either dimension is less than MIN_IMG_SIZE.
console.warn(`Image size too small. [${img.height} x ${
img.width}] vs. minimum [${MIN_IMG_SIZE} x ${MIN_IMG_SIZE}]`);
sendResponse({rawImageData: undefined});
};
img.src = src;
}
let changeImg = function() {
let images = document.getElementsByTagName("img");
for (let i = 0; i < images.length; i++) {
let src = images[i].src;
loadImageAndSendDataBack(src, function(data){
chrome.runtime.sendMessage({ msg: "image", index: i, data: data}, () => {});
});
}
}
changeImg();
background.js
import * as tf from '@tensorflow/tfjs';
const loadModel = async () => {
console.log("Loading model...");
const startTime = performance.now();
try {
let json = chrome.runtime.getURL('model/model.json')
chrome.storage.local.set({'model': json})
const model = await tf.loadLayersModel('model/model.json');
const totalTime = Math.floor(performance.now() - startTime);
console.log(`Model loaded and initialized in ${totalTime} ms...`);
chrome.runtime.onMessage.addListener((message, sender, senderResponse) => {
console.log(message.msg, message.index, message.data);
let imageData = new ImageData(
Uint8ClampedArray.from(message.data.rawImageData), message.data.width, message.data.height);
imageData = tf.browser.fromPixels(imageData);
imageData = tf.expandDims(imageData);
let prediction = model.predict(imageData);
prediction.print();
});
} catch (e) {
console.error('Unable to load model', e);
}
};
loadModel()
manifest.json
{
"name": "Filter JY",
"version": "0.1",
"description": "filter",
"manifest_version": 3,
"content_scripts": [{
"matches": ["<all_urls>"],
"run_at": "document_end",
"all_frames": true,
"js": ["src/contentScript.js"]
}],
"permissions": [
"activeTab",
"tabs",
"storage",
"scripting",
"webNavigation",
"webRequest"
],
"host_permissions": ["<all_urls>"],
"background": {
"service_worker": "src/background.js"
},
"icons": {
"16": "images/icon16.png",
"32": "images/icon32.png",
"48": "images/icon48.png",
"128": "images/icon128.png"
},
"action": {
"default_icon": {
"16": "images/icon16.png",
"32": "images/icon32.png",
"48": "images/icon48.png",
"128": "images/icon128.png"
},
"default_title": "Filter",
"default_popup": "popup.html"
},
"web_accessible_resources": [
{
"resources": ["src/model/model.json", "src/model/group1-shard1of3.bin", "src/model/group1-shard2of3.bin", "src/model/group1-shard3of3.bin"],
"matches": ["<all_urls>"]
}
],
"content_security_policy": {
"extension_pages": "script-src 'self'; object-src 'self'"
}
}
Solution
I realise my main issue is that i skipped over the preprocessing_input portion in python.
Original preprocessing code in python
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
test_image = preprocess_input(test_image)
Take note that preprocessing in mobilenet_v2 is between -1 to 1. Hence, we have to perform the scaling ourselves.
preprocessing code in js
imageData = tf.browser.fromPixels(imageData);
let offset = tf.scalar(127.50);
imageData = imageData.sub(offset).div(offset);
This should ensure that the data used in both models are the same.
Answered By - John Jam
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.