Sure.This is the code wrt to offscreen api.
sw.js
let creating; // A global promise to avoid concurrency issues
async function setupOffscreenDocument(path) {
// Check all windows controlled by the service worker to see if one
// of them is the offscreen document with the given path
const offscreenUrl = chrome.runtime.getURL(path);
const existingContexts = await chrome.runtime.getContexts({
contextTypes: ['OFFSCREEN_DOCUMENT'],
documentUrls: [offscreenUrl]
});
if (existingContexts.length > 0) {
return;
}
// create offscreen document
if (creating) {
await creating;
} else {
creating = chrome.offscreen.createDocument({
url: path,
reasons: ['CLIPBOARD'],
justification: 'testing whether offscreen API would support jQuery in order to execute getSignLanguagesWithVideos() function',
});
await creating;
creating = null;
}
}
async function getSignLanguagesWithVideos(){
await setupOffscreenDocument('./offscreen.html');
// Send message to offscreen document
const signLanguages = await chrome.runtime.sendMessage({
type: 'getSignLanguagesWithVideos',
target: 'offscreen',
data:{sparqlEndpointLinguaLibreURL:sparqlEndpoints.lingualibre.url,sparqlSignLanguagesQuery},
});
// closing the offscreen document in order to make sure one offscreen page is open at a time
await chrome.offscreen.closeDocument();
return signLanguages;
}
Query looks something like this :
sparqlSignLanguagesQuery:'SELECT ?id ?idLabel WHERE { ?id prop:P2 entity:Q4 . ?id prop:P24 entity:Q88890 . SERVICE wikibase:label { bd:serviceParam wikibase:language "fr,en". } }';
Endpoint look something like this :
sparqlEndpointLinguaLibreURL :
https://lingualibre.org/bigdata/namespace/wdq/sparql
offscreen.js
chrome.runtime.onMessage.addListener(handleMessages);
async function getSignLanguagesWithVideos(endpointURL,query) {
var i,
signLanguage,
signLanguages = [], // ?? already define in global scopte
response = await $.post(endpointURL, {
format: "json",
query,
});
for (i = 0; i < response.results.bindings.length; i++) {
var signLanguageRaw = response.results.bindings[i];
console.log("#149", signLanguageRaw);
signLanguage = {
wdQid: signLanguageRaw.id.value.split("/").pop(),
labelNative: signLanguageRaw.idLabel.value,
};
signLanguages[i] = signLanguage;
}
// create signLanguages objects
// TEMPORARY, WHEN ONLY LSF HAS VIDEOS
signLanguages = filterArrayBy(signLanguages, "wdQid", "Q99628");
console.log(signLanguages);
return signLanguages;
}
function handleMessages(message, sender, sendResponse) {
// Return early if this message isn't meant for the offscreen document.
if (message.target !== "offscreen") {
return false;
}
if (message.type !== "getSignLanguagesWithVideos") {
console.warn(`Unexpected message type received: '${message.type}'.`);
return;
}
const {data} = message;
getSignLanguagesWithVideos({...data}).then((signLanguages) => sendResponse(signLanguages));
// we need to explictly return true in our chrome.runtime.onMessage handler
// in order to allow the requestor to handle the request asynchronous.
return true;
}
Using fetch API looks something like this :
async function getSignLanguagesWithVideos() {
try {
const response = await fetch(sparqlEndpoints.lingualibre.url, {
method: "POST",
body: JSON.stringify({
format: "json",
query: sparqlSignLanguagesQuery,
}),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json(); // Await the parsed JSON response
const signLanguages = [];
for (let i = 0; i < data.results.bindings.length; i++) {
const signLanguageRaw = data.results.bindings[i];
const signLanguage = {
wdQid: signLanguageRaw.id.value.split("/").pop(),
labelNative: signLanguageRaw.idLabel.value,
};
signLanguages.push(signLanguage);
}
// Temporary filtering (assuming filterArrayBy is available)
signLanguages = filterArrayBy(signLanguages, "wdQid", "Q99628");
console.log(signLanguages);
return signLanguages;
} catch (error) {
console.error("Error fetching or processing data:", error);
}
}