Resoning

Been able to modify the content or view, as not everything should be adjusted to a printer.

Process

Packages available

Package Advantage on PDF
Mozilla pdfjs Easy rendering
pdf-lib Easy extracting
Ruksa pdfjs Easy creation
js-pdf Complete but not as powerful

Extracting from the old PDF and modify

Read local file with Mozilla pdflib

  1. Make and file input button and then process the uploaded file using the FileReader web API.

    1
    <input type="file" id="file-selector" accept=".pdf" onChange={onFileSelected} />
  2. Filereader API works with callbacks, but async/await can be used with as a helper.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    // helper function
    function readFileAsync(file) {
    return new Promise((resolve, reject) => {
    let reader = new FileReader();
    reader.onload = () => {
    resolve(reader.result);
    };
    reader.onerror = reject;
    reader.readAsArrayBuffer(file);
    });
    }

    // selector to upload file
    const onFileSelected = async (e) => {
    const fileList = e.target.files;
    if (fileList?.length > 0) {
    // store it as an array buffer
    const pdfArrayBuffer = await readFileAsync(fileList[0]);
    }
    };

Extract PDF file

  1. Get an array with the page numbers of the PDF

    1
    2
    3
    4
    5
    function range(start, end) {
    let length = end - start + 1;
    return Array.from({ length }, (_, i) => start + i - 1);
    // add -1 at the end, as number in programming start in 0
    }
  2. Extraction

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    import { PDFDocument } from "pdf-lib";

    // we pass the buffer we got on reading step as input
    async function extractPdfPage(arrayBuff) {
    const pdfSrcDoc = await PDFDocument.load(arrayBuff);
    const pdfNewDoc = await PDFDocument.create();
    // copy only the desired pages
    const pages = await pdfNewDoc.copyPages(pdfSrcDoc,range(2,3));
    pages.forEach(page=>pdfNewDoc.addPage(page));
    const newpdf= await pdfNewDoc.save();
    return newpdf;
    }
    // it returns an Uint8Array

Render the new PDF in the browser

  1. Make a URL out of it and render it inside an iframe.

    1
    2
    3
    4
    5
    6
    7
    function renderPdf(uint8array) {
    const tempblob = new Blob([uint8array], {
    type: "application/pdf",
    });
    const docUrl = URL.createObjectURL(tempblob);
    setPdfFileData(docUrl);
    }
  2. You may use your custom PDF viewer using the pdfjs library as I mentioned above.

Download the new PDF

Use the download function from your browser.

Full code

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import { useState } from "react";
import { PDFDocument } from "pdf-lib";

export default function Home() {
const [pdfFileData, setPdfFileData] = useState();

function readFileAsync(file) {
return new Promise((resolve, reject) => {
let reader = new FileReader();
reader.onload = () => {
resolve(reader.result);
};
reader.onerror = reject;
reader.readAsArrayBuffer(file);
});
}

function renderPdf(uint8array) {
const tempblob = new Blob([uint8array], {
type: "application/pdf",
});
const docUrl = URL.createObjectURL(tempblob);
setPdfFileData(docUrl);
}

function range(start, end) {
let length = end - start + 1;
return Array.from({ length }, (_, i) => start + i - 1);
}

async function extractPdfPage(arrayBuff) {
const pdfSrcDoc = await PDFDocument.load(arrayBuff);
const pdfNewDoc = await PDFDocument.create();
const pages = await pdfNewDoc.copyPages(pdfSrcDoc, range(2, 3));
pages.forEach((page) => pdfNewDoc.addPage(page));
const newpdf = await pdfNewDoc.save();
return newpdf;
}

// Execute when user select a file
const onFileSelected = async (e) => {
const fileList = e.target.files;
if (fileList?.length > 0) {
const pdfArrayBuffer = await readFileAsync(fileList[0]);
const newPdfDoc = await extractPdfPage(pdfArrayBuffer);
renderPdf(newPdfDoc);
}
};

return (
<>
<h1>Hello world</h1>
<input
type="file"
id="file-selector"
accept=".pdf"
onChange={onFileSelected}
/>
<iframe
style={{ display: "block", width: "100vw", height: "90vh" }}
title="PdfFrame"
src={pdfFileData}
frameborder="0"
type="application/pdf"
></iframe>
</>
);
}

Definition

  • static code analysis tool for infrastructure as code (IaC)
  • scans files looking for misconfigurations that may lead to security (CIS) or compliance problems
  • user custom policies in python or yaml
  • supports
    • Terraform (for AWS, GCP, Azure and OCI)
    • AWS CloudFormation (including AWS SAM)
    • Azure Resource Manager (ARM)
    • Serverless framework
    • Helm charts
    • Kubernetes
    • Docker

How to install

Install

  • Python package

    1
    pip install checkov
  • VSCode extension requires usign in Bridgecrew

Config file

Where to find it?

  • looks for a .checkov.yaml or .checkov.yml file in the following places in order of precedence:
    • Directory against which checkov is run. (-d).
    • Current working directory where checkov is called.
    • User’s home directory.

Attention: it is a best practice for checkov configuration file to be loaded from a trusted source composed by a verified identity, so that scanned files, check ids and loaded custom checks are as desired.

  • You can pass in the path to a config file via the command line. In this case, the other config files will be ignored.
    1
    checkov --config-file path/to/config.yaml

How to write it?

  • Use the --create-config command

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    # create
    checkov --compact --directory test-dir --docker-image sample-image \
    --dockerfile-path Dockerfile --download-external-modules True \
    --external-checks-dir sample-dir --no-guide --quiet \
    --repo-id bridgecrew/sample-repo --skip-check CKV_DOCKER_3,CKV_DOCKER_2 \
    --skip-fixes --skip-framework dockerfile secrets --skip-suppressions \
    --soft-fail --branch develop --check CKV_DOCKER_1 \
    --create-config /Users/sample/config.yml
    # check
    checkov --show-config
  • The output file will look like this:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    branch: develop
    check:
    - CKV_DOCKER_1
    compact: true
    directory:
    - test-dir
    docker-image: sample-image
    dockerfile-path: Dockerfile
    download-external-modules: true
    evaluate-variables: true
    external-checks-dir:
    - sample-dir
    external-modules-download-path: .external_modules
    framework:
    - all
    no-guide: true
    output: cli
    quiet: true
    repo-id: bridgecrew/sample-repo
    skip-check:
    - CKV_DOCKER_3
    - CKV_DOCKER_2
    skip-fixes: true
    skip-framework:
    - dockerfile
    - secrets
    skip-suppressions: true
    soft-fail: true

Quickstart CLI reference

CLI

1
checkov -d /myFolder
Parameter Description
-h, –help Help
-v, –version Version
-d DIRECTORY, –directory DIRECTORY IaaC root directory to scan
-f FILE, –file FILE IaaC file to scan
–docker-image DOCKER_IMAGE Scan docker images by name or ID. Requires bridgecrew token
–dockerfile-path DOCKERFILE_PATH Path to the Dockerfile of the scanned docker image
-l, –list List checks
-ca CA_CERTIFICATE, –ca-certificate CA_CERTIFICATE Custom CA certificate (bundle) file [env var: BC_CA_BUNDLE]
–config-file CONFIG_FILE path to the Checkov configuration YAML file
–download-external-modules DOWNLOAD_EXTERNAL_MODULES download external terraform modules from public git repositories and terraform registry

Terraform

  • Terraform plan in json format

    1
    2
    3
    4
    terraform init
    terraform plan -out tf.plan
    terraform show -json tf.plan > tf.json
    checkov -f tf.json
  • Convert json file into multiple lines

    1
    terraform show -json tf.plan | jq '.' > tf.json 

Custom rules

Structure

  • Metadata

    • Policy Name
    • Id: format CKV2_<provider>_<number>
    • Category (GENERAL_SECURITY, LOGGING, ENCRYPTION, NETWORKING, IAM, BACKUP_AND_RECOVERY, CONVENTION, SECRETS, KUBERNETES)
  • Definition

    • Definition Block(s) - Attribute Blocks, Connection State Blocks or both
      • Attribute Blocks: describes resources with a certain configuration.
      • Connection State Blocks - The policy describes resources in a particular Connection state.
    • Logical Operator(s) (optional)

Examples

Basic example

1
2
3
4
5
6
7
8
9
10
11
12
---
metadata:
name: "Check that all resources are tagged with the key - env"
id: "CKV2_AWS_1"
category: "GENERAL_SECURITY"
scope:
provider: aws
definition:
cond_type: "attribute"
resource_types: "all"
attribute: "tags.env"
operator: "exists"

Blocks example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
---
metadata:
name: "Check that all encrypted RDS clusters are tagged with encrypted: true"
id: "CKV2_AWS_1"
category: "SECRETS"
definition:
and:
- cond_type: "attribute"
resource_types:
- "aws_rds_cluster"
attribute: "tags.encrypted"
operator: "equals"
value: "true"
- or:
- cond_type: "attribute"
resource_types:
- "aws_rds_cluster"
attribute: "kms_key_id"
operator: "exists"
- cond_type: "attribute"
resource_types:
- "aws_rds_cluster"
attribute: "storage_encrypted"
operator: "equals"
value: "true"

Skipping checks

CLI

1
2
3
4
# 2 rules
checkov -d . --check CKV_AWS_20,CKV_AWS_57
# ules following a pattern
checkov -d . --skip-check CKV_AWS*

Terraform

1
checkov:skip=<check_id>:<suppression_comment>

Example:

1
2
#checkov:skip=CKV_AWS_20:The bucket is a public static content host
#checkov:skip=CKV_AWS_20:The bucket is a public static content host

Kubernetes

1
checkov.io/skip#: <check_id>=<suppression_comment>

Example:

1
2
3
4
5
6
7
8
9
10
11
apiVersion: v1
kind: Pod
metadata:
name: mypod
annotations:
checkov.io/skip1: CKV_K8S_20=I don't care about Privilege Escalation :-O
checkov.io/skip2: CKV_K8S_14
checkov.io/skip3: CKV_K8S_11=I have not set CPU limits as I want BestEffort QoS
spec:
containers:
...

Create the new npm project

  • Intialize a proejct

    1
    npm init -y
  • Install dependencies:

    1
    npm i async newman path
  • Result example

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    {
    "name": "newman_parallel",
    "version": "1.0.0",
    "description": "",
    "main": "index.js",
    "scripts": {
    "start": "node index.js"
    },
    "keywords": [],
    "author": "",
    "license": "ISC",
    "dependencies": {
    "async": "^3.1.0",
    "newman": "^4.5.6",
    "path": "^0.12.7"
    }
    }

Scripting

Steps

  1. Update the path for your postman collection and environment
  2. Sspecify the number of concurrent run you want to launch with the constant PARALLEL_RUN_COUNT
  3. Execute it with npm start

Example script

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
const path = require('path')
const async = require('async')
const newman = require('newman')

const PARALLEL_RUN_COUNT = 3

const parametersForTestRun = {
collection: path.join(__dirname, 'postman/postman_collection.json'),
environment: path.join(__dirname, 'postman/localhost.postman_environment.json'),
reporters: 'cli'
};

parallelCollectionRun = function (done) {
newman.run(parametersForTestRun, done);
};

let commands = []
for (let index = 0; index < PARALLEL_RUN_COUNT; index++) {
commands.push(parallelCollectionRun);
}

// Runs the Postman sample collection thrice, in parallel.
async.parallel(
commands,
(err, results) => {
err && console.error(err);
results.forEach(function (result) {
var failures = result.run.failures;
console.info(failures.length ? JSON.stringify(failures.failures, null, 2) :
'${result.collection.name} ran successfully.');
});
});

Prepare the layer

Be Careful!
Having all dependencies in a directory, and not downloading from an index, only works when the directory contains all packages. The directory should therefore contain all dependencies but also all packages that those dependencies depend on. You should therefore manually include these in requirements.txt (so that the first step downloads them explicitly, or you should install all packages using PyPI and then pip freeze > requirements.txt to store the list of all packages needed.

  1. Create a new folder for this project:
1
2
mkdir aws-lambda-layer
cd aws-lambda-layer
  1. Create a folder structure for the modules that need to be installed.
1
mkdir -p lambda-layer/python/lib/python3.8/site-packages
  1. Install modules in that folder. The structure is important, because that is where Python expects to find the modules.
1
pip install requests --target lambda-layer/python/lib/python3.8/site-packages
  1. Go into the lambda-layer folder and create a zip file for the layer. It will be uploaded using the console.
1
2
cd lambda-layer
zip -r9 lambda-layer.zip .

Create the layer

  1. Log into the AWS console and go to Services -> Lambda -> Layers
  2. Click on Create layer
    • Name (e.g.: myRequestsLayer)
    • Upload - Select your zip file from before
    • Runtime (Python 3.8)
  3. Click on Create

Creating the lambda

Manually

Create the code

  1. Log in to in the AWS console go to Services -> Lambda
  2. Click on Create function
    • Author from scratch
    • Function name (e.g.: randomDadJokes)
    • Runtime (Python 3.8)
  3. Click on Create function
  4. Replace the code in the editor with the following code, and hit Save:
1
2
3
4
5
6
7
8
9
import json
import requests

url = 'https://icanhazdadjoke.com'

def lambda_handler(event, context):
r = requests.get(url, headers={"Accept": "application/json"})
result = json.loads(r.text)
return result['joke']

Connect the layer

  1. On the lambda screen, in the Designer section, click on the Layers box.
    • Add a layer
    • Select from list of runtime compatible layers
    • Name (chose your layer)
    • Version 1
    • Add
  2. Click on the dropdown Select a test event -> Configure test events
    • Add Event Name (e.g.: Test)
    • Inputs don’t matter so you can just leave it as is or delete those keys
    • Click on Create
  3. Run it by clicking on the Test button.

Via Cloud Formation template

  1. Zip the folder that contains the lambda function
1
2
cd  my_lambda_function
zip my-deployment-package.zip lambda_function.py
  1. Upload the zip to an AWS S3 bucket.
  2. Then, the template will look similar to this.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
AWSTemplateFormatVersion: '2010-09-09'
Description: "This stack creates a Lambda function that gets its code from an
S3 bucket and makes use of 2 different Lambda layers"
Parameters:
LambdaBucket:
Description: S3 URI where the Lambda code zip will be located.
Type: String
Default: accountname_projectname
LambdaDescription:
Description: Description of the Lambda Function.
Type: String
Default: Lambda for tests.
LambdaHandler:
Description: Handler of the Lambda Function.
Type: String
Default: lambda_function.lambda_handler
LambdaMemory:
Description: Memory available to the Lambda Function.
Type: Number
Default: 832
LambdaName:
Description: Name of the Lambda function.
Type: String
Default: TestLambda
LambdaObject:
Description: Name of the zip file that includes the Lambda code.
Type: String
Default: isltest/Lambda_Api.zip
LambdaObjectVersion:
Description: Version of the Lambda code.
Type: String
Default: iegmgNFWyh86SeM8lszZWKxK60ueO4ne
LambdaRole:
Description: IAM role with permissions required to execute the Lambda function.
Type: String
Default: arn:aws:iam::123456789012:role/service-role/ChromelessTest-role-p5jqa0rk
LambdaRuntime:
Description: The programming language in which it will be written.
Type: String
Default: python3.8
LambdaTimeout:
Description: Timeout (in seconds) to execute the Lambda Function.
Type: Number
Default: 63
TracingMode:
Description: Tracing mode for integration with AWS X-Ray if needed.
Type: String
Default: Active
LambdaLayer1:
Description: ARNs of the layers that will be applied to the Lambda function,
in case you need dependencies.
Type: String
Default: arn:aws:lambda:eu-west-1:123456789012:layer:LambdaInsightsExtension:10
LambdaLayer2:
Description: ARNs of the layers that will be applied to the Lambda function,
in case you need dependencies.
Type: String
Default: arn:aws:lambda:eu-west-1:123456789012:layer:selenium-python-dependencies:12
Resources:
LambdaFunction:
Type: 'AWS::Lambda::Function'
Properties:
MemorySize: !Ref LambdaMemory
Description: !Ref LambdaDescription
TracingConfig:
Mode: !Ref TracingMode
Timeout: !Ref LambdaTimeout
Code:
S3ObjectVersion: !Ref LambdaObjectVersion
S3Bucket: !Ref LambdaBucket
S3Key: !Ref LambdaObject
Role: !Ref LambdaRole
Handler: !Ref LambdaHandler
FunctionName: !Ref LambdaName
Runtime: !Ref LambdaRuntime #"python3.8"
PackageType: "Zip"
Layers:
- !Ref LambdaLayer1 #LayerInsights
- !Ref LambdaLayer2 #LayerSeleniumPython

Lighthouse functions

  • Classic navigations
    • Page loads with cold cache
  • New
    • Page loads with a warm cache
    • Pages with an activated Service Worker
    • Accounting for potential user interactions

From recording to meassure UI-UX

Preparation - recording

  1. Get a script user flow (e.g. via Chrome Dev Tools recorder).
  2. Test it works
    1
    2
    3
    4
    5
    6
    # create node module, non interative
    npm init -y
    # install dependencies
    npm install puppeteer lighthouse
    # test
    node demo_user_flow_recording.js

Modify the recording file to use Lighthouse

  • Modify from just replay…

    1
    2
    3
    4
    5
    const puppeteer = require('puppeteer');

    (async () => {
    const browser = await puppeteer.launch();
    const page = await browser.newPage();
  • … into this to meassure UI/UX

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    const puppeteer = require('puppeteer');
    const { startFlow } = require('lighthouse/lighthouse-core/fraggle-rock/api.js');
    const fs = require("fs");

    (async () => {
    const browser = await puppeteer.launch({ headless: false });
    const page = await browser.newPage();

    const flow = await startFlow(page, {
    name: 'Go to homepage',
    configContext: {
    settingsOverrides: {
    screenEmulation: {
    mobile: false,
    width: 1350,
    height: 940,
    deviceScaleFactor: 1,
    disabled: false,
    },
    formFactor: "desktop",
    },
    },
    });

    // update viewport
    await targetPage.setViewport({ "width": 940, "height": 1350 })

    // add some steps
    {
    // capture data
    await flow.navigate("https://gitlab.com/")

    const targetPage = page;
    const promises = [];
    promises.push(targetPage.waitForNavigation());
    await targetPage.goto('https://gitlab.com/');
    await Promise.all(promises);
    }

    // generate report
    const report = flow.generateReport();
    fs.writeFileSync('report.html', report);
    await browser.close();

Mode examples

  1. Use puppeteer to open the browser.
  2. Start a Lighthouse user flow.
  3. Navigate to the target URL.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import fs from 'fs';
import open from 'open';
import puppeteer from 'puppeteer';
import {startFlow} from 'lighthouse/lighthouse-core/fraggle-rock/api.js';

async function captureReport() {
const browser = await puppeteer.launch({headless: false});
// open a browser
const page = await browser.newPage();

// start a Lighthouse user flow
const flow = await startFlow(page, {name: 'Single Navigation'});
// navigate to the target URL
await flow.navigate('https://web.dev/performance-scoring/');

await browser.close();

const report = flow.generateReport();
fs.writeFileSync('flow.report.html', report);
open('flow.report.html', {wait: false});
}

captureReport();

Capturing a warm load

  1. Use puppeteer to open the browser.
  2. Start a Lighthouse user flow.
  3. Navigate to the target URL.
  4. Do a second navigation, disabling the clearing of cache and storage that Lighthouse does by default in navigations
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
async function captureReport() {
const browser = await puppeteer.launch({headless: false});
// open the browser
const page = await browser.newPage();

const testUrl = 'https://web.dev/performance-scoring/';
// start user flow
const flow = await startFlow(page, {name: 'Cold and warm navigations'});
// 1st navigation
await flow.navigate(testUrl, {
stepName: 'Cold navigation'
});
// 2nd navigation, no cache clean up
await flow.navigate(testUrl, {
stepName: 'Warm navigation',
configContext: {
settingsOverrides: {disableStorageReset: true},
},
});

await browser.close();

const report = flow.generateReport();
fs.writeFileSync('flow.report.html', report);
open('flow.report.html', {wait: false});
}

captureReport();

Snapshots

  • Audit a single point in time: set up a page and test it in its exact state: (e.g. with a drop-down open or a form partially filled in).
  • Many of the performance metrics are currently defined as beginning with a page load and so are not applicable in a snapshot, but the accessibility audits and many of the performance best practices can still yield important checks.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
async function captureReport() {
const browser = await puppeteer.launch({headless: false});
const page = await browser.newPage();

const flow = await startFlow(page, {name: 'Squoosh snapshots'});

await page.goto('https://squoosh.app/', {waitUntil: 'networkidle0'});

// Wait for first demo-image button, then open it.
const demoImageSelector = 'ul[class*="demos"] button';
await page.waitForSelector(demoImageSelector);
await flow.snapshot({stepName: 'Page loaded'});
await page.click(demoImageSelector);

// Wait for advanced settings button in UI, then open them.
const advancedSettingsSelector = 'form label[class*="option-reveal"]';
await page.waitForSelector(advancedSettingsSelector);
await flow.snapshot({stepName: 'Demo loaded'});
await page.click(advancedSettingsSelector);

await flow.snapshot({stepName: 'Advanced settings opened'});

browser.close();

const report = flow.generateReport();
fs.writeFileSync('flow.report.html', report);
open('flow.report.html', {wait: false});
}

captureReport();

Timespans

  • Runs Lighthouse audits over some period of time, which may or may not include a navigation.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
async function captureReport() {
const browser = await puppeteer.launch({headless: false});
const page = await browser.newPage();
// Get a session handle to be able to send protocol commands to the page.
const session = await page.target().createCDPSession();

const testUrl = 'https://pie-charmed-treatment.glitch.me/';
const flow = await startFlow(page, {name: 'CLS during navigation and on scroll'});

// Regular Lighthouse navigation.
await flow.navigate(testUrl, {stepName: 'Navigate only'});

// Navigate and scroll timespan.
await flow.startTimespan({stepName: 'Navigate and scroll'});
await page.goto(testUrl, {waitUntil: 'networkidle0'});
// We need the ability to scroll like a user.
/*
There's not a direct puppeteer function for this, but we can use the DevTools Protocol
and issue a Input.synthesizeScrollGesture event, which has convenient parameters like
repetitions and delay to somewhat simulate a more natural scrolling gesture.
// https://chromedevtools.github.io/devtools-protocol/tot/Input/
// check #method-synthesizeScrollGesture
*/
await session.send('Input.synthesizeScrollGesture', {
x: 100,
y: 0,
yDistance: -2500,
speed: 1000,
repeatCount: 2,
repeatDelayMs: 250,
});
await flow.endTimespan();

await browser.close();

const report = flow.generateReport();
fs.writeFileSync('flow.report.html', report);
open('flow.report.html', {wait: false});
}

captureReport();
0%