"@builder.io/qwik": "^0.15.2",
+diff --git a/.github/workflows/on-deploy.yml b/.github/workflows/on-deploy.yml index 46d890077..79c45316c 100644 --- a/.github/workflows/on-deploy.yml +++ b/.github/workflows/on-deploy.yml @@ -57,7 +57,7 @@ jobs: with: path: ${{ env.CACHE_DIST_PATH }} key: dist-${{ github.run_id }} - - run: npx nx run valor-software-site:build-scully --prod + - run: npx nx run valor-software-site:build-scully -c stage # firebase deploy firebase_deploy: diff --git a/.github/workflows/on-push-or-pull.yml b/.github/workflows/on-push-or-pull.yml index 4f5f47a96..41b54d707 100644 --- a/.github/workflows/on-push-or-pull.yml +++ b/.github/workflows/on-push-or-pull.yml @@ -59,7 +59,7 @@ jobs: with: path: ${{ env.CACHE_DIST_PATH }} key: dist-${{ github.run_id }} - - run: npx nx run valor-software-site:build-scully --prod + - run: npx nx run valor-software-site:build-scully -c stage # run linting linting: diff --git a/.github/workflows/on_push_to_master.yml b/.github/workflows/on_push_to_master.yml index 8c6d9be4b..cc31a26fb 100644 --- a/.github/workflows/on_push_to_master.yml +++ b/.github/workflows/on_push_to_master.yml @@ -17,7 +17,7 @@ jobs: with: node-version: '16.15.1' - run: yarn install - - run: npx nx run valor-software-site:build-scully --prod + - run: npx nx run valor-software-site:build-scully:production - name: Deploy 🚀 uses: JamesIves/github-pages-deploy-action@v4.3.3 with: diff --git a/apps/valor-software-site/project.json b/apps/valor-software-site/project.json index 8659aecd5..51607c769 100644 --- a/apps/valor-software-site/project.json +++ b/apps/valor-software-site/project.json @@ -10,6 +10,9 @@ "options": { "command": "npx scully --prod --scanRoutes --project=valor-software-site" }, + "configurations": { + "uat": {} + }, "dependsOn": [ { "target": "build", @@ -44,6 +47,12 @@ }, "configurations": { "production": { + "optimization": true, + "sourceMap": false, + "namedChunks": false, + "extractLicenses": true, + "vendorChunk": false, + "buildOptimizer": true, "budgets": [ { "type": "initial", @@ -71,6 +80,32 @@ "extractLicenses": false, "sourceMap": true, "namedChunks": true + }, + "stage": { + "fileReplacements": [ + { + "replace": "apps/valor-software-site/src/environments/environment.ts", + "with": "apps/valor-software-site/src/environments/environment.stage.ts" + } + ], + "optimization": true, + "sourceMap": false, + "namedChunks": false, + "extractLicenses": true, + "vendorChunk": false, + "buildOptimizer": true, + "budgets": [ + { + "type": "initial", + "maximumWarning": "500kb", + "maximumError": "1mb" + }, + { + "type": "anyComponentStyle", + "maximumWarning": "2kb", + "maximumError": "4kb" + } + ] } }, "defaultConfiguration": "production" @@ -83,6 +118,9 @@ }, "development": { "browserTarget": "valor-software-site:build:development" + }, + "stage": { + "browserTarget": "valor-software-site:build:stage" } }, "defaultConfiguration": "development" diff --git a/apps/valor-software-site/src/app/app.module.ts b/apps/valor-software-site/src/app/app.module.ts index d0f9b64aa..bff9315cc 100644 --- a/apps/valor-software-site/src/app/app.module.ts +++ b/apps/valor-software-site/src/app/app.module.ts @@ -9,7 +9,7 @@ import { CommonDocsModule, OLD_ROUTES_FROM_OLD_SITE, PORTFOLIO_LIST, - SeoService + SeoService, } from '@valor-software/common-docs'; import { BrowserAnimationsModule } from '@angular/platform-browser/animations'; import { ScullyLibModule } from '@scullyio/ng-lib'; @@ -38,7 +38,7 @@ import { RECAPTCHA_V3_SITE_KEY, RecaptchaV3Module } from 'ng-recaptcha'; { provide: ARTICLES_REFACTORED_TITLE_LIST, useValue: articlesRefactoringTitlesList }, { provide: PORTFOLIO_LIST, useValue: projectsList }, { provide: OLD_ROUTES_FROM_OLD_SITE, useValue: linksFromOldSite }, - { provide: RECAPTCHA_V3_SITE_KEY, useValue: '6LeXDiUoAAAAABZ2FU4l2GZTJ0v5otDAQkC3UZxs' } + { provide: RECAPTCHA_V3_SITE_KEY, useValue: '6LeXDiUoAAAAABZ2FU4l2GZTJ0v5otDAQkC3UZxs' }, ], bootstrap: [AppComponent], }) diff --git a/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/A-Qwik-View-of-the-Ranking-Bar.png b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/A-Qwik-View-of-the-Ranking-Bar.png new file mode 100644 index 000000000..5dceea072 Binary files /dev/null and b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/A-Qwik-View-of-the-Ranking-Bar.png differ diff --git a/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/Slava_Chub.jpg b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/Slava_Chub.jpg new file mode 100644 index 000000000..a730ee24f Binary files /dev/null and b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/Slava_Chub.jpg differ diff --git a/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/a-qwik-view-of-the-ranking-bar.html b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/a-qwik-view-of-the-ranking-bar.html new file mode 100644 index 000000000..8e8990450 --- /dev/null +++ b/apps/valor-software-site/src/assets/articles/a-qwik-view-of-the-ranking-bar/a-qwik-view-of-the-ranking-bar.html @@ -0,0 +1,583 @@ +
This article is a sequel to my previous article. +In this new article, I’ll describe precisely the same task, but I’m going to change common and popular React to something completely different. That’s why I strongly recommend reading the previous article.
+Some time ago, I faced a new framework. I asked myself… Is it a new animal in the Framework Zoo? I have worked in IT for over 25 years and always await a "miracle." It doesn’t matter if it is Frontend, Backend, a new programming language, or DBMS. I’m trying to answer the following question for every new trend app. Will this app become mainstream? On the one hand, I remember a lot of stories such as "Angular," "React," "NodeJS," "Golang," "Postgres," "Microsoft C++," and "FoxPro." On the other hand, I remember another set of stories: "Backbone," "D Programming Language," "Polymer," "OrientDB," "Powesoft Power Builder," and "Microsoft Site Server"… I hope you guess what the difference between these lists is. Of course, I don’t want to belittle software from the second list. But some of the software is destined to be more popular than others.
+So, meet a new Framework Qwik! I am fond of predictions, but I’m not magical. I don’t know if Qwik will grab developers' minds in the nearest future. Despite that, Qwik looks like a very perspective framework. "Framework reimagined for the edge" - tells us the homepage. I like this approach. I like when an author rejects any annoying legacy and starts the project from scratch according to previous experiences. Moreover, performance is a doppelganger of Qwik. Sounds exciting!
+Generally, coding on Qwik looks close to React, which allows gaining a lot of developers from React society. Despite Qwik concept being a bit different. Unlike other frameworks, Qwik is resumable, which means Qwik applications require 0 hydration. This allows Qwik apps to have instant-on interactivity, regardless of size or complexity. Honestly, my article is mainly for React developers. But if you are not React guy, don’t worry; dig into the official resource a bit more elaborative. If you are React guy and want to start from the practice immediately, the Qwik Components Concept would be very useful. The article’s primary goal is to illustrate how to work with the framework. That’s why I’m not going to provide Qwik technical knowledge as the Official Documentation does. I aim to guide you in Qwik World via links and examples. Unlike the previous article, I used Typescript in my examples because this language is used in Qwik by default. Also, my examples below are not production ready. That’s why don’t criticize them so much:) Especially for "@ts-ignore."
+Only one exciting thing I want to tell you looking ahead. My Qwik-based code turned out more elegant than React-based! This fact could be an excellent impact to learn and use of Qwik.
+I like Stackblitz as a cloud prototyping tool. It has a lot of different presets such as Angular, React, etc. But it hasn’t Qwik preset because this framework is too young. Despite that, I found the following custom starter. +I’m going to explain what should you modify to work with Qwik and D3.
+First, we need to change one dependency in package.json.
+From:
+"@builder.io/qwik": "^0.15.2",
+To:
+"@builder.io/qwik": "^0.16.1",
+Secondly, we need to install the following new dependencies.
+"@types/d3": "7.4.0",
+"d3": "^7.8.0",
+Qwik forces us to follow a particular convention. Please, read the helpful info here.
+Actually, we are talking here more than Qwik. In this project, I use Qwik City. We call it a meta-framework for Qwik. Qwik City is to Qwik, what Next.js is to React, what Nuxt is to Vue, or SvelteKit to Svelte.
+All my future activities will be related to these conventions.
+Of course, I use Stackblitz here only to illustrate my thoughts interactively. In real life, you need to use another approach for project bootstrapping. Fortunately, Qwik has a perfect bootstrapper. If you want to start a new project, please run the following command.
+npm create qwik@latest
+Please, read Getting Started Qwik
+I prefer to explain more complicated ideas via smaller sequential examples. That’s why before we proceed with Ranking Bars, I’d like to provide a more straightforward example that we will modify to the goal in the future. In the first step, we need to get the app that displays the following information via D3.
+First, you can find the solution below here.
+The app should recalculate and redraw the dimension values for every window size change.
+Let’s remove all content from routes folder and put the following index.tsx instead.
+import { component$ } from "@builder.io/qwik";
+import App from "../components/app";
+
+export default component$(() => <App />);
+Only one index.tsx means that we use only one "root" route.
+Now we need to clean components folder.
+Put app.tsx contains App component in components folder.
+import { component$ } from "@builder.io/qwik";
+import Chart from "./chart";
+
+export default component$(() => <Chart />);
+The following file chart.tsx contains Chart component.
+import {
+ component$,
+ useStore,
+ useClientEffect$,
+ useSignal,
+ useOnWindow,
+ useTask$,
+ $,
+} from "@builder.io/qwik";
+import * as d3 from "d3";
+import { setSvgDimension } from "./utils";
+
+export default component$(() => {
+ const store = useStore({ width: 0, height: 0 });
+ const svgRef = useSignal<Element>();
+
+ useClientEffect$(() => {
+ setSvgDimension(svgRef, store);
+ });
+
+ useOnWindow(
+ "resize",
+ $(() => {
+ setSvgDimension(svgRef, store);
+ })
+ );
+
+ useTask$(({ track }: { track: Function }) => {
+ track(() => store.width);
+ track(() => store.height);
+ render(svgRef, store.width, store.height);
+ });
+
+ return <svg class="chart" ref={svgRef} />;
+});
+
+export function render(svgRef: any, width: number, height: number) {
+ d3.select(svgRef.value).select(".dimenstion-text").remove();
+
+ const svg = d3
+ .select(svgRef.value)
+ .append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+
+ svg
+ .append("text")
+ .text("Hello Qwik!")
+ .attr("x", 10)
+ .attr("y", 50)
+ .attr("width", 200)
+ .attr("fill", "red");
+
+ svg
+ .append("text")
+ .text(`Width = ${width}px | Height = ${height}px`)
+ .attr("class", "dimenstion-text")
+ .attr("x", 10)
+ .attr("y", 80)
+ .attr("width", 200)
+ .attr("fill", "black");
+}
+Also, you can find setSvgDimension code in utils.ts.
+import { Signal } from "@builder.io/qwik";
+
+export function setSvgDimension(
+ svgRef: Signal<Element | undefined>,
+ store: any
+) {
+ if (svgRef?.value) {
+ const { width, height } = svgRef.value.getBoundingClientRect();
+ store.width = width;
+ store.height = height;
+ }
+}
+Let me comment some important points.
+The component returns SVG, as in the previous article’s example.
+return <svg class="chart" ref={svgRef} />;+
useSignal allows us to work with the element above.
+const svgRef = useSignal<Element>();+
You can find more info regarding useSignal here.
+According to this: Use useClientEffect$() to execute code after the component is resumed. This is useful for setting up timers or streams on the client when the application is resumed.
+In my example, the following code sets component dimensions and puts them in the store.
+useClientEffect$(() => {
+ setSvgDimension(svgRef, store);
+});
+In this case useClientEffect$ behaviour is similar to the following code in React.
+useEffect(() => {
+ // init the component here...
+}, []);
+useOnWindow / useOn() / useOnDocument() are powerful ways to work with related listeners. In the code fragment below, we use useOnWindow to listen to every window size change.
+useOnWindow(
+ "resize",
+ $(() => {
+ setSvgDimension(svgRef, store);
+ })
+);
+You can find more information regarding hooks above here.
+The following line of code demonstrates to us how to store Qwik-trackable variables.
+const store = useStore({ width: 0, height: 0 });+
The following code allows to track related store variables changes.
+useTask$(({ track }: { track: Function }) => {
+ track(() => store.width);
+ track(() => store.height);
+ // new render when window size has changed
+ render(svgRef, store.width, store.height);
+});
+I’d like to compare useStore and useTask$ with React useState and useEffect hooks. But remember, Qwik is different!
+The main goal of render is to show the component width and height for every window size change.
+Just remind, you can find the example above here.
+As I told you at the start, this article is a sequel to my previous article. You can find all related information here. That’s why I want to get and comment my Qwik version of the Ranking Bar right now.
+Traditionally, you can look at the full solution here +Let’s focus on what’s changed…
+app.tsx
+import { component$ } from "@builder.io/qwik";
+import Chart from "./chart";
+
+export const data = {
+ Apple: 100,
+ Apricot: 200,
+ Araza: 5,
+ Avocado: 1,
+ Banana: 150,
+ // ...
+ Feijoa: 11,
+ Fig: 0,
+};
+
+// Just add a new prop "data"
+export default component$(() => <Chart data={data} />);
+utils.ts
+import * as d3 from "d3";
+import { Signal } from "@builder.io/qwik";
+
+// no changes in comparing with the previous article except for typings
+export function dotme(texts: d3.Selection<SVGElement, {}, HTMLElement, any>) {
+ texts.each(function () {
+ // @ts-ignore
+ const text = d3.select(this);
+ const chars = text.text().split("");
+
+ let ellipsis = text.text(" ").append("tspan").text("...");
+ // @ts-ignore
+ const minLimitedTextWidth = ellipsis.node().getComputedTextLength();
+ ellipsis = text.text("").append("tspan").text("...");
+
+ const width =
+ // @ts-ignore
+ parseFloat(text.attr("width")) - ellipsis.node().getComputedTextLength();
+ const numChars = chars.length;
+ const tspan = text.insert("tspan", ":first-child").text(chars.join(""));
+
+ if (width <= minLimitedTextWidth) {
+ tspan.text("");
+ ellipsis.remove();
+ return;
+ }
+
+ // @ts-ignore
+ while (tspan.node().getComputedTextLength() > width && chars.length) {
+ chars.pop();
+ tspan.text(chars.join(""));
+ }
+
+ if (chars.length === numChars) {
+ ellipsis.remove();
+ }
+ });
+}
+
+// add related types
+export interface ChartData {
+ [key: string]: number;
+}
+
+export interface NormalizedChartRecord {
+ fruit: string;
+ value: number;
+ x: number;
+ width: number;
+}
+
+// no changes in comparing with the previous article except for typings
+export function getNormalizedData(
+ data: any,
+ width: number
+): NormalizedChartRecord[] {
+ const tmpData: any[] = [];
+ let total = 0;
+ for (const key of Object.keys(data)) {
+ if (data[key] > 0) {
+ tmpData.push({ fruit: key, value: data[key] });
+ total += data[key];
+ }
+ }
+ tmpData.sort((a, b) => b.value - a.value);
+ let x = 0;
+ for (const record of tmpData) {
+ const percent = (record.value / total) * 100;
+ const barwidth = (width * percent) / 100;
+ record.x = x;
+ record.width = barwidth;
+ x += barwidth;
+ }
+ return tmpData;
+}
+
+export function setSvgDimension(
+ svgRef: Signal<Element | undefined>,
+ store: any
+) {
+ if (svgRef?.value) {
+ const { width, height } = svgRef.value.getBoundingClientRect();
+ store.width = width;
+ store.height = height;
+ }
+}
+And, finally, chart.tsx. Please, read my comments in the code.
+import {
+ component$,
+ useStore,
+ useClientEffect$,
+ useSignal,
+ useOnWindow,
+ useTask$,
+ $,
+} from "@builder.io/qwik";
+import * as d3 from "d3";
+import { ChartData, dotme, getNormalizedData, setSvgDimension } from "./utils";
+
+export interface ChartProps {
+ data: ChartData;
+}
+
+export default component$(({ data }: ChartProps) => {
+ // store width and height of the component here
+ const store = useStore({ width: 0, height: 0 });
+ // control the SVG container
+ const svgRef = useSignal<Element>();
+
+ // initialization
+ useClientEffect$(() => {
+ // update the store
+ setSvgDimension(svgRef, store);
+ });
+
+ // listen window size changes
+ useOnWindow(
+ "resize",
+ $(() => {
+ // update the store
+ setSvgDimension(svgRef, store);
+ })
+ );
+
+ // track width and height
+ useTask$(({ track }: { track: Function }) => {
+ track(() => store.width);
+ track(() => store.height);
+ // alter that, get normalized data
+ const normalizedData = getNormalizedData(data, store.width);
+ // and, finally, render the component according the new screen size
+ render(normalizedData, svgRef, store.width, store.height);
+ });
+
+ return <svg class="chart" ref={svgRef} />;
+});
+
+// the following code is close to the related one in the previous article
+export function render(
+ normalizedData: any,
+ svgRef: any,
+ width: number,
+ height: number
+) {
+ const svg = d3
+ .select(svgRef.value)
+ .append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+
+ const color = d3
+ .scaleOrdinal()
+ .domain(Object.keys(normalizedData))
+ .range(d3.schemeTableau10);
+
+ svg
+ .selectAll()
+ .data(normalizedData)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("x", (d: any) => d.x)
+ .attr("width", (d: any) => d.width - 1)
+ .attr("y", 0)
+ .attr("height", 50)
+ // @ts-ignore
+ .attr("fill", (_: any, i: number) => color(i));
+
+ svg
+ .selectAll("text")
+ .data(normalizedData)
+ .join("text")
+ .text((d: any) => d.fruit)
+ .attr("x", (d: any) => d.x + 5)
+ .attr("y", (d: any) => 30)
+ .attr("width", (d: any) => d.width - 1)
+ .attr("fill", "white");
+
+ // @ts-ignore
+ svg.selectAll("text").call(dotme);
+}
+Now, let’s run the example and reduce/increase the window size.
+One of the biggest advantages of Angular is the variety of tools and solutions that are brought to developers out of the box. One of them is the @angular/forms package, which brings the solid experience of working with any kind of UI controls. +But have you ever wondered, how exactly this works under the hood? The only thing that needs to be done in order to tie FormControl with, let’s say, a plain input is using a "formControl" binding on the input element, pointing that UI element to the instance of a FormControl.
+<input type="text" [formControl]="ctrl" />
+And voila, everything works.
+But obviously, there should be a component or directive that Angular uses to make everything happen. And that "something" can be found here: Angular brings a set of directives like default_value_accessor.ts, select_control_value_accessor.ts, checkbox_value_accessor.ts, etc. All of them implement the ControlValueAccessor interface, which, according to docs: "Defines an interface that acts as a bridge between the Angular forms API and a native element in the DOM."
+This means any component can be easily defined as a form control by implementing this interface and registering itself as an NG_VALUE_ACCESSOR provider. In practice, it requires you to define 4 methods:
+interface ControlValueAccessor {
+ writeValue(obj: any): void
+ registerOnChange(fn: any): void
+ registerOnTouched(fn: any): void
+ setDisabledState(isDisabled: boolean)?: void
+}
+*although setDisabledState is optional, there’re only a few rare scenarios when it’s indeed not needed
+To understand how exactly everything works, let’s have a look at the very basic counter component:
+<lib-counter [formControl]="counter"></lib-counter>
+<div>Counter Value: {{ counter.value }}</div>
+Here’s the code of the component itself:
+import { ChangeDetectionStrategy, ChangeDetectorRef, Component, forwardRef } from '@angular/core';
+import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
+
+const COUNTER_CONTROL_ACCESSOR = {
+ provide: NG_VALUE_ACCESSOR,
+ useExisting: forwardRef(() => CounterControlComponent),
+ multi: true,
+};
+
+@Component({
+ selector: 'lib-counter',
+ template: `
+ <button (click)="down()" [disabled]="disabled">Down</button>
+ {{ value }}
+ <button (click)="up()" [disabled]="disabled">Up</button>
+ `,
+ changeDetection: ChangeDetectionStrategy.OnPush,
+ providers: [COUNTER_CONTROL_ACCESSOR],
+})
+export class CounterControlComponent implements ControlValueAccessor {
+ disabled = false;
+ value = 0;
+
+ protected onTouched: () => void;
+ protected onChange: (value: number) => void;
+
+ constructor(private _cdr: ChangeDetectorRef) {}
+
+ up() {
+ this.setValue(this.value + 1, true);
+ }
+
+ down() {
+ this.setValue(this.value - 1, true);
+ }
+
+ registerOnChange(fn: (value: number) => void) {
+ this.onChange = fn;
+ }
+
+ registerOnTouched(fn: () => void) {
+ this.onTouched = fn;
+ }
+
+ setDisabledState(isDisabled: boolean) {
+ this.disabled = isDisabled;
+ }
+
+ writeValue(value: number) {
+ this.setValue(value, false);
+ this._cdr.markForCheck();
+ }
+
+ protected setValue(value: number, emitEvent: boolean) {
+ const parsed = parseInt(value as any);
+ this.value = isNaN(parsed) ? 0 : parsed;
+ if (emitEvent && this.onChange) {
+ this.onChange(value);
+ this.onTouched();
+ }
+ }
+}
+As you see here we’re implementing 4 methods and providing COUNTER_CONTROL_ACCESSOR. This is needed in order to let Angular know it deals with an instance of a form control.
+So what’s happening with control is:
+Once FormControl is initialised, it invokes writeValue, registerOnChange and registerOnTouched methods on the counter component. This syncs the initial state of the FormControl with our counter and also passes onTouched and onChanged methods into the counter, so it can talk back to the FormControl when the user interacts with it.
When the value is changed, FormControl invokes the writeValue method, so counter updates its internal state without triggering the onChange/onTouched methods.
When the user interacts with our counter, it’s required to not only update the internal state but also notify parent FormControl about this state change, thus onChange/onTouched methods are invoked.
+Although that’s not really a lot going on here, it is worth taking a look at a few important implementation details. And this is actually what this article is about
+onChange should be only triggered by an internal event!
+It’s important to keep in mind that these methods should only be used to notify FormControl about the change that was triggered in the component internally. In other words, if FormControl changes the value of the component, it should never notify FormControl back about this change. This is a quite common mistake as it won’t break anything at the first glance, instead you’ll be able to notice it by subscribing to valueChanges of the bound FormControl:
+export class AppComponent {
+ readonly animal = new FormControl(‘rabbit’);
+
+ constructor() {
+ ctrl.valueChanges.subscribe(console.log);
+ animal.setValue(‘hare’);
+ animal.setValue(‘cat’);
+ }
+}
+In the normal scenario by executing the code above you will see only 2 logs: ‘hare’, ‘cat’. However, if your writeValue method ends up invoking onChange you will see doubled console logs in the output: ‘hare’, ’hare’, ‘cat’, ‘cat’.
+Here’s a modified code of CounterComponent where this issue can be seen, when FormControl invokes writeValue we notify it back with the onChange method:
+// ... code of CounterComponent
+writeValue(value: number) {
+ // it's convenient to reuse existing "setValue" method, right?
+ // however, this will lead to the incorrect behavior
+ this.setValue(value);
+ this._cdr.markForCheck();
+}
+
+protected setValue(value: number) {
+ const parsed = parseInt(value as any);
+ this.value = isNaN(parsed) ? 0 : parsed;
+ if (this.onChange) {
+ this.onChange(value);
+ this.onTouched();
+ }
+}
+onChange and onTouched should not always be called together!
+onChange/onTouched methods actually serve completely different purposes. While onChange is used to pass data when a component’s state changed internally, onTouched should be invoked after the user interacts with the component. This doesn’t always mean the component’s value is changed.
+onTouched method is used in 2 cases:
by FormControl to update its touched state
+when you set up your control to use updateOn: “blur", FormControl uses it to properly identify this blur event to apply the value to itself.
+For the CounterComponent both touch and change events are combined because the only way to interact with it is by clicking the button. However, with other components, the flow will be different. For instance, a plain <input /> element with a tied FormControl (with DefaultValueAccessor under the hood) is expected to be marked as touched when the user interacts with the input even by focusing it. Thus, for this kind of components onTouched emission should be tied to the blur event from the input.
+With an introduction of typed forms, form controls can now either infer a type from the default value or be typed explicitly. There’s an interesting thing, though: if we define a control c#onst control = new FormControl<string>()# and then check its type, it will be string | null. And you might wonder: why does the type of this control include null? This is because the control can become null at any time, by calling the reset() method on it. Here’s an example from angular docs:
+const control = new FormControl('Hello, world!');
+control.reset();
+console.log(control.value); // null
+Although this becomes quite obvious with typed forms, this behavior was inherent in forms from the very beginning. And while new handy types may catch issues with control’s values, it doesn’t really save you from any issues with nulls inside your CVA. Moreover, since CVA component doesn’t have any control over the form it’s being used within and there’s no way to enforce certain types of control on the form, it’s possible to actually pass literally any value into the control. Hence this value will end up passing into the writeValue, which can potentially break your component.
+Let’s change our CounterComponent as follows:
+// ... code of CounterComponent
+writeValue(value: number) {
+ // it's convenient to reuse existing "setValue" method, right?
+ // however, this will lead to the incorrect behavior
+ this.setValue(value, false);
+ this._cdr.markForCheck();
+}
+
+protected setValue(value: number, emitEvent: boolean) {
+ this.value = value;
+ if (emitEvent && this.onChange) {
+ this.onChange(value);
+ this.onTouched();
+ }
+}
+CounterComponent is too simple to have big issues with null because JavaScript will cast null into 0 (null + 1 = 1), but as you can see component is visually broken after reset() is called. So it’s very important to keep in mind this behavior and implement some value protections for the writeValue method.
+Even if you keep in mind all the potential pitfalls listed above, there’s always a chance something will go wrong due to some change or enhancement in the future. The best way to maintain the valid behavior of a component is to have extensive unit test coverage. However, it might be annoying to write the same list of tests for all CVA components or some use cases can be accidentally left without coverage. So it should be much better to have one unified testing solution, that can keep your components safe.
+And there’s one called ngx-cva-test-suite. It’s a small npm package, that provides an extensive set of test cases, ensuring your custom controls behave as intended. It is designed and tested to work properly with both Jest and Jasmine test runners.
+Among the main features:
+ensures the correct amount of calls for the onChange function (incorrect usage may result in extra emissions of valueChanges of formControl)
+ensures correct triggering of onTouched function (is needed for touched state of the control and updateOn: 'blur' strategy to function properly)
+ensures that no extra emissions are present when control is disabled
+checks for control to be resettable using AbstractControl.reset()
+It is quite easy to be configured, here’s the usage scenario for the CounterComponent we looked into within this article:
+import { runValueAccessorTests } from 'ngx-cva-test-suite';
+import { CounterControlComponent } from './counter.component';
+
+runValueAccessorTests({
+ /** Component, that is being tested */
+ component: CounterControlComponent,
+ /**
+ * All the metadata required for this test to run.
+ * Under the hood calls TestBed.configureTestingModule with provided config.
+ */
+ testModuleMetadata: {
+ declarations: [CounterControlComponent],
+ },
+ /** Whether component is able to track "onBlur" events separately */
+ supportsOnBlur: false,
+ /**
+ * Tests the correctness of an approach that is used to set value in the component,
+ * when the change is internal. It's optional and can be omitted by passing "null"
+ */
+ internalValueChangeSetter: null,
+ /** Function to get the value of a component in a runtime. */
+ getComponentValue: (fixture) => fixture.componentInstance.value,
+ /** When component is reset by FormControl, it should either get a certain default internal value or "null" */
+ resetCustomValue: { value: 0 },
+ /**
+ * This test suite applies up to 3 different values on the component to test different use cases.
+ * Values can be customized using this configuration option.
+ */
+ getValues: () => [1, 2, 3],
+});
+You can learn more about usage examples in the package’s repository or get inspiration by looking at a few CVA components that are placed within the repository here.
+What is this all about? Am I about to spend 5 minutes on another corporate blog article that makes no sense to me and doesn’t even involve something related to my own experiences?
+Well, that’s a resounding No!
+It is about us! If you’re a part of Valor, it’s all about you!
It has been a long road since 2013. We have matured and adapted, we have a shared history and personal stories of all of us. During the maturation, we went through this transitional age, which cost us losing our youthful optimism and losing our Angular fanaticism in favor of a wide spectrum of technologies.
+The realization hit harder than we expected. What we do is not about tooling but rather about what we enable, and it’s always more important than the fact that we had used Angular for that particular project to achieve the firm Valor-quality result.
+We all remember the cozy feeling of slipping into your favorite pair of jeans that overstayed their welcome for a couple of extra years. The same goes for technologies, limiting ourselves to only one tool from the galaxy-wide toolbox puts extra restrictions and limits us in our ability to grow in every direction out there.
+Our dream was so vivid and big, that we went against the famous saying about not putting all eggs in one basket. Our dream was to become the largest Angular hub in Ukraine. Our dream was so solid that we even rented an office for 300 folks in Kharkiv. And it went well for some time. We had our share of success, NgTalks 2018, the first-ever Ukrainian Kharkiv-based Angular conference was a huge blast! We also made some decent talent acquisitions in those years. But all those eggs in a single basket started to crack.
+Blocks and restrictions are oftentimes nothing more than an illusion in our heads because humans’ brains tend to oppose change. Once we recognize what’s blocking us – it’s a half-work done. This way we understood that great people and professionals spread all around the world
. And possibly we could find a common language and a common ground to make up a team. We went for it, and voila!! We’re an international team of, in a good way, crazy and passionate professionals!
Chasing personal goals is OK, but it’s only OK if it doesn’t weaken other activities. We realized that we can be the best in Angular out there, but there’s no use in being the best if you’re a company that tries to fit the market with only one way of creating a product. Maturing has a price, and we paid the price in shifting from our blind motivations for technical and professional perfection to a more mature understanding of a need to balance between real things: market demands, our personal goals, and professional perfection.
+And we’ve matured
. Indeed we dropped the chains
of being bound to one city, one country, and one technology to become a multinational multiple tech stacks guns blazing experts.
It all came to the fact that the only permanent thing in this world is change, and it’s a great state to be which drives us to move, to leave, to grow. What hasn’t changed in all those years is our approach to work. Here and now we aren’t looking for a new static vector, rather we are looking for a way to embrace the change
and share our perception of it with all of us to ride on the same wave.
We’ve all had our chance to make sure that “it’s neither my headache nor my piece of cake” isn’t working, be it projects or human relationships. We aren’t expecting that something will happen on its own. Life is a series of choices and multiple decisions
when it comes to morality, quality, initiative, indifference, or involvement. So let’s get control over the only thing we have control over – whether to ignore or take action. If we notice that something isn’t working at its best, we act. In a nutshell: “Be the driver for the change you want to achieve and the place where you want to be!”
.
Obviously, we are all linked by our contracts, ClickUp accounts, and daily things. But there’s something bigger that connects us. We’re part of Valor, we’re part of the team driven by the same aspirations and beliefs. Our approach no matter what exactly is unified. We choose to be proactive and involved
, be it with tasks or the people who surround us.
Many things are going on in our lives, within work and beyond. Now, possibly, more stuff than ever before (and definitely more than we’d like to deal with). Some time ago we would meet in Valor’s home-like office where everyone had their cups, friends, pets to pet (three of ‘em), and favorite work spots. There we could talk. We could be emotional. We could interact. Damn, we could learn from each other’s mistakes faster, even! Now we give up to the new circumstances to some extent, still embracing humans which we all are.
+These short talks that we will arrange once in a while, which we hope to have your feedback on, may bring the feeling of presence, the feeling of belonging to something you’re excited about, or just a feeling of a friend’s shoulder nearby
! AND NOPE, this is not at all a small thing to desire. This format is not final, so it might morph into one or more iterations further on, to keep you and us involved.
What are we suggesting? First: ask us anything! We promise you’ll get your answers, because we strive to maintain this big human-like attitude, despite being, well, a company that makes money at the end of the day (who would have thought!).
+The cool thing about all this is that we intend to share our stories
, not just basic corporate-bloggy-success-stories and know-hows. We all had our share of ups and downs, and both of them deserve to be seen and heard because they are not only educational in a professional sense, but entertaining, fun, unfun, or even heartbreaking.
We all learn as we go. Not only Junior-level positions are bound to make mistakes, but we are also creatures who are prone to make wrong decisions. In the coming weeks, we will try to gather a cycle of sucс- and suck-sess stories from our experience working in Valor.
+Do you have some? – Come tell! There are thousands of kilometers between us, and only one button to push to get in touch.
Stay safe, stay tuned, and see you soon ;)
We often encounter routine processes in our work, but what if we could make them less monotonous and even enjoyable? The answer to this problem is automation, a well-known solution used by many people in various fields.
+In this article, we will explore the design processes that can be automated to save time for other tasks. Let’s take a real example: creating a cover image for a blog post on social media. This may seem simple, but let’s break it down into its components:
+Text title
Background image
Contextual images
Author’s photo
Social media cover dimensions
Image size compression
Final move
The first step in creating a cover image is to have a text title. In most cases, the title is provided. But in case the author is writing the article, the title can be determined based on the content of the article. For example, in this case, the title is "Design processes automation with AI services".
+The next step is to create a background image. This can be a time-consuming and creatively challenging task, especially if it’s done every day. However, by using a service such as Midjourney, the task can be delegated to AI, which will create something cool. All you need to do is to describe the image and Midjourney will generate it for you.
+I’ll skip the signup and other initial stages, and start with equal examples of using it.
+We need to think about the description here and fill in it in the prompt. I used this prompt: +“generate a flat, art, technological abstract dark gradient background with a minimalistic pattern, without any text, device, or object in front, just an empty neutral background for a social media post on topic: Design processes automation with AI services”
+Choose one picture of 4 options and download it in Hi-resolution.
+Use it for our article cover background
+The third step is to add some contextual images, such as icons or logos, to make it easier for readers to understand the topic of the article. For this article, the logos of the services used - Figma, letsenhance.io, remove.bg, Thumblisher, TinyCanvas (by TinyPNG), Midjourney, and ChatGPT - will be used.
+The fourth step is to improve the author’s picture. In a perfect world, high-quality pictures with a solid background, good composition, and a human pose and facial expression would be provided to us, but in reality, the picture may need to be improved. To enhance the picture’s resolution and sharpen details, letsenhance.io can be used. The service is simple and intuitive to use, just make sure to choose the proper preset and avoid any text.
+To remove the background from the picture, a remove.bg plugin in Figma can be used instead of the traditional pen clipping in Photoshop.
+Another option is to use a Background Remover tool.
The final step is to create different dimensions of the picture to fit different social media platforms such as LinkedIn, Twitter, and Facebook. This can be done easily with a Figma plugin named Thumblisher. All you need to do is select the social media type and image type, and Thumblisher will generate the picture from the source file.
+With the TinyCanvas plugin, we can compress multiple images in Figma
+We have now completed our task, but what about mentioning ChatGPT? Its value cannot be overestimated, as this entire article was written by this tool. The author only guided it and described what he wanted to receive, demonstrating how powerful this tool is.
+In conclusion, with the help of AI services such as Midjourney, letsenhance.io, remove.bg, and Thumblisher, the process of creating a cover image for a blog post can be streamlined and made more efficient. ChatGPT, a language model, was also used to write this article, highlighting the value of AI in various tasks.
+This article introduces you to a specific but exciting topic and is the sequel to my previous article. If you are keen on Rust integrations, please read Node & Rust: Friendship Forever. The NAPI-rs Way.
+I suppose all of you, dear colleagues work or at least know about VSCode. Did you think about the technologies used in VSCode creation? You probably will be surprised if I tell you that VSCode is mainly written on Typescript. But stop… Typescript and Javascript are typical for web or backend-based applications, and VSCode is a standalone UI application. Is it possible to create a Javascript-based standalone UI application? Yes, it is!
+If we had discussed this topic a couple of months ago, I would have recommended ElectronJS if you were looking for a way to create a standalone Javascript application. Also, I would provide you the following list of popular Electron-based applications.
+Microsoft Teams
+Zoom
+Slack for Desktop
+WordPress for Desktop
+Skype
+Discord
+WhatsApp Desktop
+Postman
+MongoDB Compass
+But the modern IT World does not stand still, and we’ve already had a powerful ElectronJS competitor (it could be its killer in the nearest future, BTW).
+If you want to get a brief comparison Tauri with Electron, please, read this article. Goodbye Electron. Hello Tauri will also be helpful if you want to understand Tauri pros and some brief technical details.
+There is a brief comparison for my impatient readers.
+Framework |
+"Front-end" |
+"Back-end" |
+
Electron |
+Chromium browser |
+NodeJS |
+
Tauri |
+Native Webview |
+Rust-compiled code |
+
One small note regarding Native Webview meant above. You can find ultimate information on this topic here. In a nutshell, Tauri applications use as HTML renderer Webkit (safari engine) on MacOS, Microsoft Edge WebView2 on Windows, and WebKitGTK on Linux (port of Webkit for Linux). Pay attention to the fact that a Tauri application could behave differently on different platforms according to the information above.
+What thoughts would we conclude regarding the table above? Tauri is about performance and simplicity! As a developer who spent several years on Electron-related projects, I’m pretty sure NodeJS could be a bottleneck for the following reasons.
+NodeJS is a heavyweight solution with complicated architecture. I mean V8, LibUV with Event Loop, etc.
+NodeJS is not a good choice if we need to implement heavy processes like image, data processing, or complicated math calculations.
+Inter-Process Communication (Electron IPC) is a way of communication between the "Frontend" and "Backend" in Electron. Its functionality is overcomplicated in coding.
+Implementing a multithreading NodeJS-based "Backend" in our Electron-based application could be a nightmare.
+Tauri demolishes all of the cons above for the following reasons.
+Rust-complied code contains only the needed minimum of functionality (without redundant architectural stuff like V8 or LibUV).
+Rust is multithreading-friendly and allows us to get multi-platform implementations.
+Rust is full of useful memory-safe mechanisms that prevent developers from making the mistakes, and as a result, we get high-quality predictable code.
+Rust-complied code is also more performative than NodeJS-based.
+In my opinion, the pros above are critical for the "Backend." That’s why according to the reasons above, I found Tauri approach as a perspective.
+BTW if you are not a Rust expert and want to know something new about Rust multithreading , please read Multi-threading for Impatient Rust Learners.
+Of course, Tauri is something new. Despite this, it has good documentation. There are many interesting articles on this topic, and I recommend the following resources reading or watching.
+Tauri - a video allows us to do the first steps
+Goodbye Electron. Hello Tauri - provides an exciting example including events between "Frontend" and "Backend"
+Tauri - Next.js - explains how to provide NextJS-based "Frontend"
+My objective is to provide you with something new to run and test. I created a Tauri application with NextJS & Ant Design-based "Frontend" with some "Backend" calculations that look heavyweight. This application shows us Progress Bar on a screen, and related "progress" data is prepared on the "Backend" (Rust) side.
+Let’s get started!
+npx create-next-app@latest --use-npm --typescript
+Answer the following questions…
+cd tauri-nextjs-demo
+npm i --save-dev @tauri-apps/cli
+npm i @tauri-apps/api --save
+Update next.config.js
+/** @type {import('next').NextConfig} */
+
+const nextConfig = {
+ reactStrictMode: true,
+ // Note: This feature is required to use NextJS Image in SSG mode.
+ // See https://nextjs.org/docs/messages/export-image-api for different workarounds.
+ images: {
+ unoptimized: true,
+ },
+};
+
+module.exports = nextConfig;
+Update scripts section in package.json
+{
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "export": "next export",
+ "start": "next start",
+ "tauri": "tauri",
+ "lint": "next lint"
+ }
+}
+npm run tauri init
+Answer the following questions…
+Answer the following questions…
+src-tauri folder contains our backend part.
+The first bootstrapped version contains a minimal set of functionality. Let’s fix it.
+Please, open src-tauri/src/main.rs and put the following code.
+#![cfg_attr(
+ all(not(debug_assertions), target_os = "windows"),
+ windows_subsystem = "windows"
+)]
+use tauri::Window;
+use std::{thread, time};
+
+#[derive(Clone, serde::Serialize)]
+struct Payload {
+ progress: i16,
+}
+
+#[tauri::command]
+async fn progress_tracker(window: Window){
+ let mut progress = 0;
+ loop {
+ window.emit("PROGRESS", Payload { progress }).unwrap();
+ let delay = time::Duration::from_millis(100);
+ thread::sleep(delay);
+ progress += 1;
+ if progress > 100 {
+ break;
+ }
+ }
+}
+
+fn main() {
+ tauri::Builder::default()
+ .invoke_handler(tauri::generate_handler![progress_tracker])
+ .run(tauri::generate_context!())
+ .expect("error while running tauri application");
+}
+Pay attention to the points below.
+progress_tracker function should be called from the "Frontend" (Typescript) part.
+#[tauri::command] is an attribute that defines the function above as a Javascript-friendly
+window: Window parameter should be passed from the "Frontend" side.
+The loop inside progress_tracker returns a number every 100ms 100 times.
+Pay attention on .invoke_handler(tauri::generate_handler![progress_tracker]) in main function. You must "register" your Frontend-friendly function.
+Also, you need to change tauri.identifier value in src-tauri/tauri.conf.json. Say, to com.buchslava.dev in my case. +After that, change build.beforeBuildCommand value to npm run build && npm run export in the file above. It’s important because in this example we work with NextJS SSG.
+Let’s move to our "Frontend" part.
+Move to the project’s root folder and put the following code into src/pages/index.tsx
+import { invoke } from "@tauri-apps/api/tauri";
+import { listen } from "@tauri-apps/api/event";
+import { useEffect, useState } from "react";
+
+interface ProgressEventPayload {
+ progress: number;
+}
+
+interface ProgressEventProps {
+ payload: ProgressEventPayload;
+}
+
+export default function Home() {
+ const [busy, setBusy] = useState<boolean>(false);
+
+ useEffect(() => {
+ // listen what can Rust part tell us about
+ const unListen = listen("PROGRESS", (e: ProgressEventProps) => {
+ console.log(e.payload.progress);
+ });
+
+ return () => {
+ unListen.then((f) => f());
+ };
+ }, []);
+
+ return (
+ <div>
+ {!busy && (
+ <button
+ onClick={() => {
+ setBusy(true);
+ setTimeout(async () => {
+ const { appWindow } = await import("@tauri-apps/api/window");
+ // call Rust function, pass the window
+ await invoke("progress_tracker", {
+ window: appWindow,
+ });
+ setBusy(false);
+ }, 1000);
+ }}
+ >
+ Start Progress
+ </button>
+ )}
+ </div>
+ );
+}
+It’s time to run the example…
+npm run tauri dev
+Let’s open Developer Console (Right click on the screen → Inspect → Switch to Console tab) and press "Start Progress" button.
+Congrats! We finished the basic Touri stuff and it’s time to focus on "Frontend" upgrading.
+You can find this solution here.
+We need to add a Progress Bar widget to the screen and show the progress on it instead of Console. +First, install Ant Design dependency.
+npm i antd --save
+Second, remove all content from src/styles/Home.module.css. +Third, put the following content into src/styles/globals.css.
+body {
+ position: relative;
+ width: 100vw;
+ height: 100vh;
+ font-family: sans-serif;
+ overflow-y: hidden;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+}
+Fourth, put the following code into src/pages/index.tsx instead the existing.
+import { invoke } from "@tauri-apps/api/tauri";
+import { listen } from "@tauri-apps/api/event";
+import { useEffect, useState } from "react";
+import { Button, Progress } from "antd";
+
+interface ProgressEventPayload {
+ progress: number;
+}
+
+interface ProgressEventProps {
+ payload: ProgressEventPayload;
+}
+
+export default function Home() {
+ const [busy, setBusy] = useState<boolean>(false);
+ const [progress, setProgress] = useState<number>(0);
+
+ useEffect(() => {
+ const unListen = listen("PROGRESS", (e: ProgressEventProps) => {
+ setProgress(e.payload.progress);
+ });
+
+ return () => {
+ unListen.then((f) => f());
+ };
+ }, []);
+
+ return (
+ <div>
+ <div style={{ width: "70vw" }}>
+ <Progress percent={progress} />
+ </div>
+ <Button
+ type="primary"
+ disabled={busy}
+ onClick={() => {
+ setBusy(true);
+ setTimeout(async () => {
+ const { appWindow } = await import("@tauri-apps/api/window");
+ await invoke("progress_tracker", {
+ window: appWindow,
+ });
+ setBusy(false);
+ }, 1000);
+ }}
+ >
+ Start Progress
+ </Button>
+ </div>
+ );
+}
+Let’s look at the result…
+npm run tauri dev
+Looks good. But I’m a suspicious guy, and I must be 100% sure that everything between Rust and NextJS parts stays together. I want to add a timer to the "Frontend" screen. As a result, Progress and Timer should work simultaneously without stops.
+Let’s put the following code into src/pages/index.tsx instead the existing.
+import { invoke } from "@tauri-apps/api/tauri";
+import { listen } from "@tauri-apps/api/event";
+import { useEffect, useState } from "react";
+import { Button, Progress } from "antd";
+
+interface ProgressEventPayload {
+ progress: number;
+}
+
+interface ProgressEventProps {
+ payload: ProgressEventPayload;
+}
+
+export default function Home() {
+ const [busy, setBusy] = useState<boolean>(false);
+ const [progress, setProgress] = useState<number>(0);
+ const [timeLabel, setTimeLabel] = useState<string>();
+
+ useEffect(() => {
+ const timeIntervalId = setInterval(() => {
+ setTimeLabel(new Date().toLocaleTimeString());
+ }, 1000);
+ const unListen = listen("PROGRESS", (e: ProgressEventProps) => {
+ setProgress(e.payload.progress);
+ });
+
+ return () => {
+ clearInterval(timeIntervalId);
+ unListen.then((f) => f());
+ };
+ }, []);
+
+ return (
+ <div>
+ <div style={{ position: "fixed", top: 20, left: 20 }}>{timeLabel}</div>
+ <div style={{ width: "70vw" }}>
+ <Progress percent={progress} />
+ </div>
+ <Button
+ type="primary"
+ disabled={busy}
+ onClick={() => {
+ setBusy(true);
+ setTimeout(async () => {
+ const { appWindow } = await import("@tauri-apps/api/window");
+ await invoke("progress_tracker", {
+ window: appWindow,
+ });
+ setBusy(false);
+ }, 1000);
+ }}
+ >
+ Start Progress
+ </Button>
+ </div>
+ );
+}
+It’s time to make the last stitch. Till we have progress functionality, we need to stop it somehow. The following modifications allow us to do it.
+src-tauri/src/main.rs
+#![cfg_attr(
+ all(not(debug_assertions), target_os = "windows"),
+ windows_subsystem = "windows"
+)]
+use tauri::Window;
+use std::{thread, time};
+use std::sync::{Arc, RwLock};
+
+#[derive(Clone, serde::Serialize)]
+struct Payload {
+ progress: i16,
+}
+
+#[tauri::command]
+async fn progress_tracker(window: Window){
+ // New code
+ let stop = Arc::new(RwLock::new(false));
+ let stop_clone = Arc::clone(&stop);
+ let handler = window.once("STOP", move |_| *stop_clone.write().unwrap() = true);
+ // / New code
+
+ let mut progress = 0;
+ loop {
+ // New code
+ if *stop.read().unwrap() {
+ break;
+ }
+ // / New code
+ window.emit("PROGRESS", Payload { progress }).unwrap();
+ let delay = time::Duration::from_millis(100);
+ thread::sleep(delay);
+ progress += 1;
+ if progress > 100 {
+ break;
+ }
+ }
+ window.unlisten(handler); // New code
+}
+
+fn main() {
+ tauri::Builder::default()
+ .invoke_handler(tauri::generate_handler![progress_tracker])
+ .run(tauri::generate_context!())
+ .expect("error while running tauri application");
+}
+src/pages/index.tsx
+import { invoke } from "@tauri-apps/api/tauri";
+import { listen } from "@tauri-apps/api/event";
+import { useEffect, useState } from "react";
+import { Button, Progress } from "antd";
+
+interface ProgressEventPayload {
+ progress: number;
+}
+
+interface ProgressEventProps {
+ payload: ProgressEventPayload;
+}
+
+export default function Home() {
+ const [busy, setBusy] = useState<boolean>(false);
+ const [progress, setProgress] = useState<number>(0);
+ const [timeLabel, setTimeLabel] = useState<string>();
+
+ useEffect(() => {
+ const timeIntervalId = setInterval(() => {
+ setTimeLabel(new Date().toLocaleTimeString());
+ }, 1000);
+ const unListen = listen("PROGRESS", (e: ProgressEventProps) => {
+ setProgress(e.payload.progress);
+ });
+
+ return () => {
+ clearInterval(timeIntervalId);
+ unListen.then((f) => f());
+ };
+ }, []);
+
+ return (
+ <div>
+ <div style={{ position: "fixed", top: 20, left: 20 }}>{timeLabel}</div>
+ <div style={{ width: "70vw" }}>
+ <Progress percent={progress} />
+ </div>
+ <Button
+ type="primary"
+ disabled={busy}
+ onClick={() => {
+ setBusy(true);
+ setTimeout(async () => {
+ const { appWindow } = await import("@tauri-apps/api/window");
+ await invoke("progress_tracker", {
+ window: appWindow,
+ });
+ setBusy(false);
+ }, 1000);
+ }}
+ >
+ Start Progress
+ </Button>
+ {/* New code */}
+ <Button
+ type="primary"
+ disabled={!busy}
+ onClick={async () => {
+ const { appWindow } = await import("@tauri-apps/api/window");
+ await appWindow.emit("STOP");
+ setProgress(0);
+ setBusy(false);
+ }}
+ >
+ Stop Progress
+ </Button>
+ {/* / New code */}
+ </div>
+ );
+}
+Frontend Backend Communication in Tauri: Implementing Progress Bars and Interrupt Button will tell you more regarding the technique above.
+You can find the related source here.
+Finally, I want to focus on build stuff. Let’s build the app. BTW, I’m working under MacOS. Please, read this one if you want to get more about Tauri build. Let’s build!
+npm run tauri build
+The next information will help you understand where and what you can find regarding the result of the build. You can find your build in /src-tauri/target/release/bundle.
+In MacOS, you will find the standalone application /src-tauri/target/release/bundle/macos with the installer-based build /src-tauri/target/release/bundle/dmg.
+The most exciting thing here is the 4.7Mb application and 2.3Mb installer. Can you believe it? 4.7Mb of Rust & NextJS & Ant Design!
+Do you want to compare Tauri’s result with Electron’s one???
+Honestly, when I got this result, my memories from my past returned. I remember 20mb hard disks and IBM PC XT.
+I also thought about the following. Amazing! I can put an application from 2023 to my PC from 1990. Sounds like a time machine!
+PS: Thanks to Eduardo Speroni for helpful notes that improve the article.
+Python provides a range of tools and libraries for performing programming, which are designed to cater to various needs and levels of complexity. It is crucial to have an understanding of these tools in order to develop applications that are both efficient and responsive. Now let’s delve into the details of the tools, for this purpose.
+Introduction to Multithreading +Multithreading allows concurrent execution of multiple threads within a single process. Threads share the same memory space, making it suitable for I/O bound tasks.
+Threading Module +The threading module facilitates working with threads in Python. Let’s create a simple example to demonstrate multithreading:
+import threading
+
+def print_numbers():
+ for i in range(1, 6):
+ print(f"Number {i}")
+
+def print_letters():
+ for letter in 'abcde':
+ print(f"Letter {letter}")
+
+if __name__ == "__main__":
+ thread1 = threading.Thread(target=print_numbers)
+ thread2 = threading.Thread(target=print_letters)
+
+ thread1.start() # Start the thread to run the 'print_numbers' function concurrently
+ thread2.start() # Start another thread to run the 'print_letters' function concurrently
+
+ thread1.join() # Wait for thread1 to complete its task before moving on
+ thread2.join() # Wait for thread2 to complete its task before moving on
+
+
+# Number 1
+# Letter a
+# Letter b
+# Number 2
+# Number 3
+# Number 4
+# Number 5
+# Letter c
+# Letter d
+# Letter e
+It’s important to understand that while you have two threads (thread1 and thread2) that are executing concurrently, the order in which they execute their tasks is not guaranteed. This lack of ordering is due to the nature of threading and how operating systems schedule threads for execution. Here’s why you see the output in a seemingly random order:
+Thread Scheduling: The operating system’s thread scheduler determines when and in what order threads run. Threads can be preempted and paused at any time, and the scheduler decides which thread to execute next based on factors like thread priorities and time slices.
+Non-Atomic Print Operation: The print operation itself is not atomic, meaning it’s not a single, uninterrupted action. When you print something, it involves multiple steps like acquiring the console output lock, formatting the output, and releasing the lock. Between these steps, other threads can run, which can result in interleaved output.
+For example, consider this possible order of execution:
+thread1 starts and prints "Number 1".
+thread2 starts and prints "Letter a" and "Letter b".
+thread1 continues and prints "Number 2", "Number 3", "Number 4", "Number 5".
+thread2 continues and prints "Letter c", "Letter d", "Letter e".
+In this example, we’ll use multithreading to download files concurrently from different URLs:
+import threading
+import requests
+
+def download_file(url, filename):
+ response = requests.get(url)
+ with open(filename, "wb") as file:
+ file.write(response.content)
+ print(f"Downloaded {filename}")
+
+if __name__ == "__main__":
+ urls = [
+ "https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.6.zip",
+ "https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.2.1.zip",
+ "https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.3.1.zip",
+ ]
+ threads = []
+ for i, url in enumerate(urls):
+ thread = threading.Thread(target=download_file, args=(url, f"file_{i}.txt"))
+ thread.start()
+ threads.append(thread)
+
+ for thread in threads:
+ thread.join()
+
+
+# Downloaded https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.3.1.zip
+# Downloaded https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.2.1.zip
+# Downloaded https://github.com/wkhtmltopdf/wkhtmltopdf/archive/refs/tags/0.12.6.zip
+If you observed that downloading files without using threads (i.e., in a sequential or synchronous manner) took less time than downloading them with threads, there are a few potential reasons for this counterintuitive behavior:
+Global Interpreter Lock (GIL): Python has a Global Interpreter Lock (GIL) that allows only one thread to execute Python bytecode at a time, even on multi-core processors. This means that in a multithreaded Python program, threads can be limited by the GIL, especially if the tasks involve CPU bound operations. In the case of downloading files, which is typically I/O bound (waiting for data to be transferred over the network), using threads may not provide a significant advantage, and it could even introduce some overhead.
+Network Bound: If the download speed is limited by the network bandwidth, using multiple threads might not lead to a significant improvement because the bottleneck is the network speed, not the CPU. In such cases, the overhead of managing multiple threads can outweigh any potential gains.
+Thread Overhead: Creating and managing threads in Python comes with some overhead. If the tasks are relatively simple, such as downloading files, the overhead of creating and managing threads can outweigh the benefits of concurrency.
+Resource Contention: When using threads, there can be contention for system resources like CPU and memory. If the system becomes saturated with threads, context switching and resource contention may slow down the overall performance.
+Thread Management: The example code provided for multithreading may not be optimized for maximum concurrency. In a real-world scenario, optimizing thread management, such as using thread pools or asyncio for I/O bound operations, can yield better results.
+Introduction to Multiprocessing +Multiprocessing allows parallel execution of multiple processes, each with its own memory space. It’s suitable for CPU-bound tasks.
+Multiprocessing Module +The multiprocessing module supports multiprocessing in Python. Let’s create an example demonstrating multiprocessing:
+import multiprocessing
+
+def worker(number):
+ result = number * number
+ print(f"Result: {result}")
+
+if __name__ == "__main__":
+ processes = []
+ for i in range(1, 6):
+ process = multiprocessing.Process(target=worker, args=(i,))
+ processes.append(process)
+ process.start()
+
+ for process in processes:
+ process.join()
+
+
+# Result: 4
+# Result: 9
+# Result: 1
+# Result: 25
+# Result: 16
+Explanation: In this multiprocessing example, we’re creating multiple processes to perform a CPU-bound task, which is calculating the square of a number. However, the order in which the results are printed may not necessarily match the order of the input values.
+This is because the individual processes run concurrently and independently of each other. They may complete their tasks in a different order, depending on factors like the CPU’s availability and scheduling. As a result, the printed results can appear in a random or unordered fashion.
+Multiprocessing is ideal for parallelizing CPU bound tasks to leverage multiple CPU cores effectively. However, it doesn’t guarantee a specific order of execution or results, as the processes run in parallel and their completion times can vary.
+Introduction to Asynchronous I/O +Asynchronous I/O enables non-blocking concurrency. The event loop manages tasks, making it suitable for I/O-bound operations.
+Asyncio Module +The asyncio module provides tools for asynchronous programming. Let’s create an example to illustrate asyncio:
+import asyncio
+
+async def print_numbers():
+ for i in range(1, 6):
+ print(f"Number {i}")
+ await asyncio.sleep(1)
+
+async def print_letters():
+ for letter in 'abcde':
+ print(f"Letter {letter}")
+ await asyncio.sleep(1)
+
+async def main():
+ task1 = asyncio.create_task(print_numbers())
+ task2 = asyncio.create_task(print_letters())
+
+ await task1
+ await task2
+
+if __name__ == "__main__":
+ asyncio.run(main())
+
+
+
+# Number 1
+# Letter a
+# Number 2
+# Letter b
+# Number 3
+# Letter c
+# Number 4
+# Letter d
+# Number 5
+# Letter e
+In the code, you are using asyncio to create two asynchronous tasks (print_numbers and print_letters) and running them concurrently. Each task includes an await asyncio.sleep(1) statement, which effectively suspends the execution of the task for 1 second before continuing.
+The main coroutine is executed when you run the program.
+Inside main, you create two tasks: task1 (for print_numbers) and task2 (for print_letters). These tasks are started concurrently.
+task1 starts executing the print_numbers coroutine. It prints "Number 1" and then hits the await asyncio.sleep(1) line. While it sleeps, the event loop continues.
+Simultaneously, task2 starts executing the print_letters coroutine. It prints "Letter a" and then awaits for 1 second.
+After 1 second, task1 resumes execution, printing "Number 2" and then sleeping again.
+task2 also resumes after 1 second, printing "Letter b" and then sleeping.
+This pattern continues until both task1 and task2 have completed their respective loops. The await statements within each coroutine introduce pauses, allowing the other task to make progress while the first one sleeps.
+As a result, you see an interleaved output where "Number" and "Letter" lines are mixed together because both tasks are running concurrently and asynchronously, each yielding to the event loop during the await asyncio.sleep(1) calls. This allows for a more responsive and non-blocking execution of tasks in an event-driven manner, which is one of the key benefits of asyncio.
+In this article we have introduced the principles of programming in Python using three primary methods: multithreading, multiprocessing and asyncio. We have discussed how each approach caters to scenarios, such as improving tasks that involve input/output operations handling intensive tasks or efficiently managing network operations. As you continue your journey into the world of programming in Python these fundamental techniques will serve as tools for creating responsive and high performing applications.
+In our articles of the "Mastering Asynchronous Programming, in Python" series we will delve deeper into strategies, best practices and real world applications of these asynchronous Python programming techniques. Whether you want to optimize web scraping processes, develop web services or improve data processing pipelines, our comprehensive series will provide you with the knowledge and skills needed to excel in the field of Python development. Stay tuned for content and practical examples that will help take your Python programming skills to new heights.
+At Valor Software, we are steadfast in our belief in the transformative power of collaboration and the open-source community. As partners in Module Federation, we have recently ramped up our Developer Relations (DevRel) initiatives to elevate awareness surrounding Module Federation and its ongoing development. In this article, we present an inspiring account of our collaboration with an external contributor, Fyodorov Andrei, who effectively implemented a plugin for Storybook that supports Module Federation remote containers.
+As a company committed to Module Federation, we at Valor Software acknowledge the critical role that collaboration and learning play in driving progress. This narrative revolves around Andrei’s journey, motivated by a passion for learning and a desire to contribute to the community. He joined forces with us and embarked on a developmental journey that encompassed numerous conversations with our team and the Nx team, and eventually led to a beautifully crafted result.
+I started by researching how to modify the Webpack configuration for Storybook. I discovered the webpackFinal method and storybook-module-federation utility that is used to update storybook configuration, and realized I could create a Storybook add-on to achieve my goal. After learning how to create an addon, similar to the ones Module Federation already had. I began the implementation and created a small project to evaluate my solution.
+The first issue I encountered indicated that shared dependencies could not be consumed. After investigating, I found that I needed to apply an asynchronous boundary with the help of VirtualModulesPlugin. Since Storybook already had this plugin in its Webpack configuration, I had to take all virtual modules, write them physically in node modules, and create a new virtual module to apply the asynchronous boundary.
+Upon achieving preliminary results, I turned my attention to integrating my solution within an existing project. As the project utilized the Nx framework, I had to familiarize myself with creating a package therein. After figuring out the process, I successfully moved my solution to the universe repository.
+Having relocated the solution, I generated a pull request and proceeded with the implementation. In response to my initial review, I was tasked with incorporating examples into the project. However, I encountered difficulties with the applications produced by Nx. Further scrutiny revealed that Nx had altered Webpack and returned the configuration as a module, which was incompatible with Storybook.
+To address this issue, I developed utils that supplanted Nx utils. Subsequently, I submitted a pull request to the Nx repository, seeking the simplification of certain utils. Following its approval, I updated the Nx version and performed the requisite refactoring.
+My dedication to learning and enriching the community, bolstered by the invaluable support and guidance offered by the Valor Software and Nx teams, culminated in a triumphant collaboration. This partnership serves to underscore the extraordinary possibilities inherent within the open-source community and the immense potential that arises from collaboration grounded in a collective aspiration for innovation and advancement.
+By uniting our efforts, we can continue to break new ground, unlock novel discoveries, and contribute to a vibrant, perpetually expanding ecosystem of knowledge and development.
+This story started in 1991 when I had just finished high school and began studying at Kharkiv Polytechnic University. During my first face with IBM PC, I faced this beautiful game, and since then, I can’t get it out of my head. I want to tell you about the Columns game. This game is a perfect example of an arcade game. Although it has many different implementations, including Sega version, my favorite one is Nathan Meyers 1989 MS-DOS port. I’m keen on its coloristic, minimalistic, and aesthetic.
+I’m a person who follows traditions, and this game has become one of them. Whenever an interesting UI-related technology emerges or when I begin using one, I implement my Columns solution using that particular technology. For example 2007, I created the Adobe Flash version, and in 2019, I provided a Flutter-based one.
+But let’s stop digging into the history and focus on today.
+The Qwik web framework was released in 2021 and, in my mind, became one of the most ambitious competitors in the UI area. A few months ago, I provided an article regarding Qwik.
+However, in retrospect, I realize that I didn’t provide as much information as I intended to. The best way to disclose an essential topic is to provide a real-life example. And then, I remembered my old tradition and decided that the Web-based Columns game could be a great option if I want to improve my Qwik skills and tell this story to you, my dear reader.
+Looking ahead, I’m glad to introduce you Qwik Columns!
+Most of my friends compare this game with Tetris. At first glance, it really looks like Tetris. But this game is a bit different because it forces you to think differently than you do it playing Tetris. The main point is that colors are valuable in Columns. I’ll explain the rules a bit later.
+I guess you noticed "Episode 01" in the title. This topic, frankly, could not be represented as an article due to the number of points to be covered. That’s why I decided to split my telling into chunks and provide you with episodes. Each episode will focus on a subtopic, including Gameplay Architecture, Animation, Vector graphics via the D3 library, and, of course, Qwik. The current episode is just an introduction.
+There is a rectangle-based area that contains squares. It will be empty (white) before playing. A shape (actor) consists of three vertically arranged squares with different colors. It moves down until possible; that is, it reaches the bottom of the area or is faced with the next cell as a colored one. After, the related cells become colored according to the actor’s colors. And then, a new actor appears at the top of the area. There is a checking process before the new actor appears. If colored squares are three or more equal colors on horizontal, vertical, and diagonal, then they should be demolished with increasing scores, and gaps will be collapsed. These are the original rules. Please look at the illustrations below.
+Diagonal
+Vertical and Horizontal
+Checking and collapsing works till all of the same-colored verticals, horizontals, and diagonals are demolished.
+But it would not be me if I implemented the rules as they are. I decided to add a couple of extra points.
+1. If a square containing 4 squares appears, this shape should +be demolished.
+2. If a square containing 2 + 2 squares like the one below appears, this shape should be demolished.
+Let’s enjoy the game!
+As I mentioned earlier, this article is the start of the episodes that reveal the exciting game creation process. Step by step, we will reach the final solution!
+In the next episode, I will explain how to bootstrap the project and make the first scratches via Qwik and D3.
+I introduced you to the general Columns Game idea in the previous episode. And now, it’s time to move forward. This episode will explain how to start the game implementation on Qwik. The first steps will include the following points.
+Bootstrapping.
+The first steps in gameplay.
+But before, I want to speculate a bit regarding the Web implementation of an Arcade Game and focus on one strategic topic. First, we must understand what issues to consider to implement the game successfully. They are the following:
+Library or Framework. In this case, we talk about Qwik, as I explained in Episode 01.
+We need to choose an approach for graphical objects drawing. Of course, we can use a pure HTML canvas-based approach. But let’s not reinvent the wheel; let’s wear it out.
+It is also essential to determine which CSS framework we will use. In my mind, pure CSS usage is also not a good idea for the reason explained in the previous point.
+It’s time to start our implementation with the points above covering.
+I recommend Getting Started Qwikly before we start.
+Please, run
+npx npm create qwik@latest
+and answer the following questions as:
+Where would you like to create your new project?
+./qwik-columns
+Select a starter
+Basic App
+Would you like to install npm dependencies?
+Yes
+Initialize a new git repository?
+Yes or No, it’s your choice…
Congratulations! We just successfully started bootstrapping.
+cd qwik-columns
+Let’s return to the points above regarding Graphical Drawing and CSS.
+Before the first serious facing with D3, I thought this beautiful library was just about charts. But as far as I implemented different custom charts, I found this is a delusion. One of the main pros of D3 is its universality. In the following episodes, I’ll prove it! Together we will implement the game via D3!
+Currently, I don’t see any alternative to the D3 drawing approach because:
+It has powerful shape-drawing tools and color management.
+D3 is about vector graphics; also, this library is animation-friendly.
+It allows associating (or binding) data to the graphical representation.
+It has a lot of patterns, like different, even tricky chart types.
+D3 is well-documented and has a vast community.
+Let’s install D3 dependencies.
+npm i d3 --save
+Also, we need to install expected typings
+npm i @types/d3 --save-dev
+Let’s switch to CSS issue.
+The relationships between me and CSS are very pragmatic because I’m a full-stack guy and always don’t have much time for sophisticated dancing with CSS. Of course, I understand and respect the beauty of BEM and some other methodologies. But I’m simultaneously thinking about architecture and design, the beauty of JS, TS, Go, Rust code, DB structure, etc. Moreover, some existing CSS-based implementations make me suffer and waste my time. A good example is Node-sass and Node-gyp Errors. That’s why I expect something powerful, easy, and lightweight. According to the following points, I think Tailwindcss is the best choice.
+Control Over Styling
+Faster CSS Styling Process
+Responsiveness and Security
+Additional Features. Tailwind CSS works in the front end of a website. For this reason, it is reasonable for developers to demand ultimate responsiveness. Well, Tailwind provides the ability to create responsive themes for your web applications and remove all unused CSS classes. With PurgeCSS, Tailwind helps you keep your final CSS as small as possible.
+Of course, it has some cons.
+Styling and HTML are Mixed
+Lack of Important Components
+It Takes Time to Learn
+Let’s analyze them. The first couple is only relevant when considering big or even enterprise projects. In that case, we could think about another framework. Regarding the third point, I agree only partially because Tailwind is well-documented, and you can also find many examples and valuable existing approaches. Our current project is not so large, and we can 100% use Tailwind which keeps our time and nervousness.
+Fortunately, Qwik is super friendly with Tailwind!
+Please, follow this documentation.
+Let’s do it.
+npm run qwik add tailwind
+Answer "Yes." That’s it. Feel free to use Tailwind in the project!
+It’s time to think about our first steps regarding gameplay. I want to take my time with this and pass it through the following steps.
+Make a working canvas.
+Implement an elementary moving shape, say, a square.
+Make the shape above also movable via keyboard events.
+These steps will efficiently introduce you to the future tricky gameplay.
+Create a new folder: src/components/game-play
+Put utils.ts there.
+import type { Signal } from "@builder.io/qwik";
+import type { MainStore } from "./game";
+
+export function setSvgDimension(
+ svgRef: Signal<Element | undefined>,
+ store: MainStore
+) {
+ if (svgRef?.value) {
+ const { width, height } = svgRef.value.getBoundingClientRect();
+
+ store.width = width;
+ store.height = height;
+ }
+}
+The main goal of setSvgDimensionis to set keep width and height of a component in the Qwik store. The component is represented by svgRef as a Qwik Signal. Please, look at a couple of links above if Qwik is something new to you.
+Put the following content into src/routes/index.tsx
+import { component$ } from "@builder.io/qwik";
+import Game from "../components/game-play/game";
+
+export default component$(() => {
+ return <Game />;
+});
+Remove src/components/starter folder. Also, remove all files except src/routes/index.tsx from src/routes/. And finally, put game.tsx file into src/components/game-play.
+game.tsx
+import type { Signal } from "@builder.io/qwik";
+import { useVisibleTask$ } from "@builder.io/qwik";
+import {
+ component$,
+ useStore,
+ useSignal,
+ useOnDocument,
+ useOnWindow,
+ $,
+} from "@builder.io/qwik";
+import * as d3 from "d3";
+import { setSvgDimension } from "./utils";
+
+export function render(
+ svgRef: Signal<Element | undefined>,
+ width: number,
+ height: number,
+ x: number,
+ y: number
+) {
+ if (!svgRef.value) {
+ return;
+ }
+
+ const svg = d3.select(svgRef.value);
+ svg.selectAll("*").remove();
+ svg
+ .append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+
+ svg
+ .append("g")
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", width)
+ .attr("y", 0)
+ .attr("height", height)
+ // @ts-ignore
+ .attr("fill", () => d3.color("#ffffff"));
+
+ const data = [{ x, y }];
+
+ svg
+ .selectAll()
+ .data(data)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("x", (d) => d.x)
+ .attr("width", 15)
+ .attr("y", (d) => d.y)
+ .attr("height", 15)
+ // @ts-ignore
+ .attr("fill", () => d3.color("#ff0000"));
+}
+
+export interface MainStore {
+ width: number;
+ height: number;
+ horPos: number;
+ vertPos: number;
+}
+
+export default component$(() => {
+ const store = useStore<MainStore>({
+ width: 0,
+ height: 0,
+ horPos: 200,
+ vertPos: 0,
+ });
+ const containerRef = useSignal<Element>();
+ const svgRef = useSignal<Element>();
+
+ useOnWindow(
+ "resize",
+ $(() => {
+ setSvgDimension(containerRef, store);
+ })
+ );
+
+ useOnDocument(
+ "keypress",
+ $((event) => {
+ const keyEvent = event as KeyboardEvent;
+ if (keyEvent.code === "KeyA") {
+ store.horPos -= 10;
+ } else if (keyEvent.code === "KeyD") {
+ store.horPos += 10;
+ }
+ })
+ );
+
+ useVisibleTask$(({ cleanup }: { cleanup: Function }) => {
+ setSvgDimension(containerRef, store);
+ const intervalId = setInterval(() => {
+ store.vertPos += 10;
+ render(svgRef, store.width, store.height, store.horPos, store.vertPos);
+ }, 700);
+ cleanup(() => clearInterval(intervalId));
+ });
+
+ return (
+ <div class="flex justify-center w-screen h-screen pt-5" ref={containerRef}>
+ <svg
+ class="game-area"
+ width={store.width}
+ height={store.height}
+ ref={svgRef}
+ />
+ </div>
+ );
+});
+It’s time to run the solution: npm start.
+As we can see here, there is a moving square. You can move it left via the "A" key and right via "D."
+If you want to dig into this code immediately, please use this source.
+<div class="flex justify-center w-screen h-screen pt-5" ref={containerRef}>
+ <svg
+ class="game-area"
+ width={store.width}
+ height={store.height}
+ ref={svgRef}
+ />
+</div>
+We use SVG as a game board container. It will be represented as a Qwik Signal.
+++++Use useSignal() to create a reactive signal (a form of state). The useSignal() takes an initial value and returns a reactive signal.
+++The reactive signal returned by useSignal() consists of an object with a single property .value. If you change the value property of the signal, any component that depends on it will be updated automatically.
+
We use containerRef as a data associated with the root element. BTW, pay attention to the Tailwind-based 'class.' The aim of containerRef is to keep the dimension of the screen.
+const containerRef = useSignal<Element>();
+const svgRef = useSignal<Element>();
+The heart of the component is a store.
+export interface MainStore {
+ width: number;
+ height: number;
+ horPos: number;
+ vertPos: number;
+}
+const store = useStore<MainStore>({
+ width: 0,
+ height: 0,
+ horPos: 200,
+ vertPos: 0,
+});
+We keep the following data:
+Width and height of the component (root component dimension equals to the root’s)
+Horizontal and vertical positions of the square
+useVisibleTask$ hook is very important in Qwik because
+++++Use useVisibleTask$() to execute code after the component is resumed. This is useful for setting up timers or streams on the client when the application is resumed.
+
useVisibleTask$(({ cleanup }: { cleanup: Function }) => {
+ setSvgDimension(containerRef, store);
+ const intervalId = setInterval(() => {
+ store.vertPos += 10;
+ render(svgRef, store.width, store.height, store.horPos, store.vertPos);
+ }, 700);
+ cleanup(() => clearInterval(intervalId));
+});
+There are two main activities above.
+Set the root component/board dimension: setSvgDimension(containerRef, store);
+Provide a main loop of the game via setInterval.
+Let’s dig into the main loop. Moving down is represented by store.vertPos += 10;. +We just adding 10 pixels every 700 millisecond.
+After, we should render the board and the square.
+render function literally does the following things.
+It takes the SVG-based component representation svgRef.value and binds it with svg variable via d3.select method. Clears all previous stuff in the SVG if it does exist.
+const svg = d3.select(svgRef.value);
+svg.selectAll("*").remove();
+Draws a white rectangle due to the screen dimension. BTW, the following link will be useful.
+svg
+ .append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+
+svg
+ .append("g")
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", width)
+ .attr("y", 0)
+ .attr("height", height)
+ // @ts-ignore
+ .attr("fill", () => d3.color("#ffffff"));
+Associate the SVG-based component with x and y (just one element) and draw it.
+const data = [{ x, y }];
+
+svg
+ .selectAll()
+ .data(data)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("x", (d) => d.x)
+ .attr("width", 15)
+ .attr("y", (d) => d.y)
+ .attr("height", 15)
+ // red
+ .attr("fill", () => d3.color("#ff0000"));
+++++Use useOn() / useOnDocument() / useOnWindow() to programmatically set up listeners on host elements. This is useful when you are creating custom APIs and don’t have access to place these events in the JSX or if the events are not known ahead of time, such as if they are created based on component props.
+
The following code allows us to resize the component during the window resizing.
+useOnWindow(
+ "resize",
+ $(() => {
+ setSvgDimension(containerRef, store);
+ })
+);
+The following code allows us to process keyboard events. In this example, a user moves the square left when the "A" key has been pressed and right if the "D" key has been pressed.
+useOnDocument(
+ "keypress",
+ $((event) => {
+ const keyEvent = event as KeyboardEvent;
+ if (keyEvent.code === "KeyA") {
+ store.horPos -= 10;
+ } else if (keyEvent.code === "KeyD") {
+ store.horPos += 10;
+ }
+ })
+);
+It’s time to build the solution.
+npm run build
+It seems we passed this step successfully. But please pay attention to the text at the bottom:
+Missing an integration
+
+・ Use npm run qwik add to add an integration
+・ Use npm run preview to preview the build
+As a result, you don’t see index.html in the dist folder. Let’s fix this issue.
+If you want to make this solution web-friendly, please run:
+npm run qwik add
+What integration would you like to add?
+Adapter: Static site (.html files)
Ready to apply the static updates to your app?
+Yes looks good, finish update!
Let’s build the solution again… +And now we should see the expected index.html.
+Please, read Qwik Deployments if you need to learn more about Qwik providers.
+I hope this episode was useful and informative. Feel free to read and run this solution. But it is just preparation before the actual gameplay implementation. The next episode will disclose all the main secrets about the game! See you!
+I’m glad to see you in Game-n-Qwik Episode 03. I appreciate your patience! In the past episode, I explained what steps we should pass before implementing actual gameplay. I want to highlight some critical steps we passed there.
+Bootstrapping. Qwik. Tailwind. D3.
+First scratches regarding gameplay.
+Just to be sure that we are on the same page now, I’d like to recommend returning to the source of the last episode, running it, and reading the explanation.
+In a nutshell, we stopped at a simple example with a big white canvas and a red square that moves down. Also, the user can move it left and right via the keyboard. This example allows us to move forward. Please use this source if you want to pass all future steps manually. For my impatient audience, I’d like to provide the final version of this episode’s code.
+Let’s move forward. Our aim, for now, is to provide the game’s basic functionality. Traditionally, I’ll give the code before and comment afterward.
+First, let’s change src/components/game-play/utils.ts to…
+import type { Signal } from "@builder.io/qwik";
+import type { MainStore } from "./game";
+
+export function setSvgDimension(
+ svgRef: Signal<Element | undefined>,
+ store: MainStore
+) {
+ if (svgRef?.value) {
+ const { height } = svgRef.value.getBoundingClientRect();
+ const rows = store.game.board.length;
+ const columns = store.game.board[0].length;
+
+ const newHeight = (height * 5) / 6;
+ const blockSize = newHeight / rows;
+ const newWidth = blockSize * columns;
+
+ store.width = newWidth;
+ store.height = newHeight;
+ store.blockSize = blockSize;
+ }
+}
+In the last version of the game, board dimensions were equal to the root component’s dimensions. In this case, we need to provide trickier logic.
+The height of the game’s canvas should be 5/6 of the root height
+const newHeight = (height * 5) / 6;+
The width should relate to height and the horizontal number of the board’s cells. That’s why we must detect the cell size using the following formula.
+const blockSize = newHeight / rows;+
One note regarding the board’s grid. As I explained in Episode 01, the game is represented by a grid-based canvas. Of course, the main attributes of each grid are rows and columns numbers. You can look at the initial state of the grid here.
+export const COLOR_WHITE = "#ffffff";
+
+const w = COLOR_WHITE;
+// .......
+export const initData: ColumnsColor[][] = [
+ [w, w, w, w, w, w, w],
+ // .......
+ [w, w, w, w, w, w, w],
+];
+According to the above information, we can calculate our columns and rows quantities using the following formulas.
+const columnsQty = initData[0].length;
+const rowsQty = initData.length;
+or
+const columnsQty = board[0].length;
+const rowsQty = board.length;
+This is a good example.
+And calculate the width.
+const newWidth = blockSize * columns;+
And finally, we can update the Qwik store. I’ll focus on it a bit later.
+store.width = newWidth; +store.height = newHeight; +store.blockSize = blockSize;+
That’s it for setSvgDimension.
+The most important and complicated part of the game is this one. I’ll break my tradition and ask you to put this file as it is to src/components/game-play without the source code provided. Also, I want to explain only some of the code because the all-code explanation will be too dull. You can dig into this outside this alone. The reason is quite simple. This file contains all of the logic details. Despite that, I’ll provide you with all exported functions and explain what they do because they are used in our Qwik part.
+One of the basic definitions regarding gameplay is a Phase. The thing is that each game is a process that has one state per time. The following enum describes all possible states of our Columns. Please, read carefully the comments in the code below.
+export enum Phase {
+ INACTIVE, // Has not started or stopped
+ PAUSED, // On pause
+ MOVING, // An actor moves down
+ MATCH_REQUEST, // Checking process, see Episode 01
+ COLLAPSE_REQUEST, // Calls collapsing process after checking, see Episode 01
+}
+The second important definition is regarding the Actor. Please, read carefully the comments in the code below.
+export interface Actor {
+ state: ColumnsColor[]; // The actor's state is represented by 3 colored cells
+ column: number; // Actor's horizontal position
+ row: number; // Actor's vertical position
+}
+The third and most important definition is the Game. The following interface describes all information about our Columns. Please, read carefully the comments in the code below.
+export interface Game {
+ board: ColumnsColor[][]; // Current board state as a two-dimensional array (horizontal x vertical)
+ actor: Actor; // The actor; described above
+ phase: Phase; // Current phase; described above
+ nextActor: ColumnsColor[]; // We also need to display the next actor that's why we define it here
+ score: number; // Current score as a number
+}
+I’d like to recommend you refresh your knowledge about the gameplay before you browse the following table. Please, look through Episode 01.
+The following calculations as a set of functions are directly related to the gameplay.
+The name of the function |
+Purpose |
+
+ | Conducts checking process |
+
+ | Conducts collapsing process |
+
+ | Answers, if the next actor’s moving down possible? |
+
+ | Finalizes current actor’s moving down if it’s impossible due to |
+
+ | Checks, is the game finished? |
+
+ | Swaps colors in the actor |
+
+ | Init a new game; resets a state of the game. |
+
+ | Answers is the actor empty? |
+
+ | Answers, is the next actor colored? |
+
+ | Provides an array of random colors |
+
+ | Bumps the new actor’s state due to the next one and init the new next state after that. |
+
+ | Moves the actor down |
+
+ | Moves the actor one cell left |
+
+ | Moves the actor N cells left |
+
+ | Moves the actor one cell right |
+
+ | Moves the actor N cells right |
+
Please, don’t be scared by the list above. We will use all these functions, and I’ll describe how exactly.
+And now, I’m happy to give you the most exciting part of the code when we gather all previous knowledge: I’ve grouped all of them into the following features.
+The store.
+Gameplay utilization functions.
+The heart of the game.
+All our future activities will be around the source of the game 1. The related destination is placed in the source of the game 2. All my future reasonings will reveal the source of the game transformation from state 1 to 2.
+Let’s get started!
+Let’s focus on the store definition.
+export interface MainStore {
+ // Canvas width
+ width: number;
+ // Canvas height
+ height: number;
+ // We should keep the current state of the game. See "The Main Definition" above.
+ game: Game;
+ // We need to keep the size of the cell because if relates to the screen size
+ blockSize: number;
+ // The following field is an indicator of the finished game. I'll focus on it a bit later.
+ gameOverPopup: boolean;
+}
+And now, we are ready for the store initialization. I hope it helps you ultimately understand the declarative part of the game.
+const store = useStore<MainStore>({
+ // Initially, we don't know about the screen dimensions
+ width: 0,
+ height: 0,
+ // The initial state of the game
+ game: {
+ // The current board should be a clone of initData
+ board: [...initData],
+ actor: {
+ // The current actor should be a clone of initActor
+ state: [...initActor],
+ // start actor's column (horizontal position) should be ±at the middle of the canvas
+ column: Math.floor(initData[0].length / 2),
+ // start actor's row (vertical position) should be outside the canvas
+ row: -2,
+ },
+ // Let's set Phase.MOVING
+ phase: Phase.MOVING,
+ // Set "nextActor" as a randomly generated
+ nextActor: randomColors(3),
+ score: 0,
+ },
+ // Initially, we don't know about the screen dimensions that's why blockSize = 0
+ blockSize: 0,
+ // The game is not over yet
+ gameOverPopup: false,
+});
+Let me introduce you to the "workhorse" of the game. The feature is that all UI stuff here is encapsulated in one function. Please, look at the render function and read carefully the comments there.
+export function render(
+ game: Game, // pass the state of the game; I'll focus on it a bit later
+ svgRef: Signal<Element | undefined>, // we also need the SVG representation
+ width: number, // tha shapes; see also "setSvgDimension"
+ height: number,
+ blockSize: number
+) {
+ if (!svgRef.value) {
+ return;
+ }
+
+ // Associate our SVG with D3; read also, https://github.com/d3/d3-selection
+ const svg = d3.select(svgRef.value);
+ // Remove all existing content from the SVG if it present.
+ // The reason is that all render calls should fully refresh the screen
+ svg.selectAll("*").remove();
+ // Append the root SVG element
+ svg
+ .append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+ // Draw a white rectangle over ALL canvas
+ svg
+ .append("g")
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", width)
+ .attr("y", 0)
+ .attr("height", height)
+ // @ts-ignore
+ .attr("fill", () => d3.color(COLOR_WHITE));
+
+ // Prepare board cells data by the game state (board).
+ // The data is represented by an array of objects.
+ // Each object contains
+ // x (horizontal coordinate of the cell),
+ // y (vertical coordinate of the cell)
+ // value - cell's color
+ const data = [];
+ for (let i = 0, x = 0, y = 0; i < game.board.length; i++) {
+ x = 0;
+ for (let j = 0; j < game.board[i].length; j++) {
+ data.push({ x, y, value: game.board[i][j] });
+ x += blockSize;
+ }
+ y += blockSize;
+ }
+
+ // Draw the current board
+ svg
+ .selectAll()
+ // Data binding allows us to draw each cell of the board
+ // It's a kind of declarative loop.
+ .data(data.filter((d) => d.value !== COLOR_WHITE))
+ .enter()
+ .append("g")
+ .append("rect")
+ // x is taken from the data; see data preparation above
+ .attr("x", (d) => d.x)
+ .attr("width", blockSize)
+ // y is taken from the data; see data preparation above
+ .attr("y", (d) => d.y)
+ .attr("height", blockSize)
+ // color/value is taken from the data; see data preparation above
+ // it represented by a hex value as a string and finally
+ // it should be transformed to the D3 compatible color: "d3.color(d.value)"
+ // @ts-ignore
+ .attr("fill", (d) => d3.color(d.value))
+ .attr("stroke", "#000000")
+ .attr("stroke-width", 1);
+
+ // We need to draw an actor only the actor should be moving (Phase.MOVING)
+ if (game.phase === Phase.MOVING) {
+ // Actor data preparation
+ const actorData = [];
+ for (let i = 0; i < game.actor.state.length; i++) {
+ actorData.push({
+ x: game.actor.column * blockSize,
+ y: (game.actor.row + i - 1) * blockSize,
+ value: game.actor.state[i],
+ });
+ }
+
+ // Draw the actor according to the data above
+ // similarly to "Draw the current board" above
+ svg
+ .selectAll()
+ .data(actorData)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("class", "could-fly")
+ .attr("x", (d) => d.x)
+ .attr("width", blockSize)
+ .attr("y", (d) => d.y)
+ .attr("height", blockSize)
+ // @ts-ignore
+ .attr("fill", (d) => d3.color(d.value))
+ .attr("stroke", "#000000")
+ .attr("stroke-width", 1);
+ }
+}
+The following functions are responsible for the user’s actions.
+// Just call the render
+const reRender = $(() => {
+ render(store.game, svgRef, store.width, store.height, store.blockSize);
+});
+// Move left action handler
+const doLeft = $(() => {
+ // if the actor is moving then call "moveLeft" (see Exports above) and re-render
+ if (store.game.phase === Phase.MOVING) {
+ moveLeft(store.game);
+ reRender();
+ }
+});
+// Move right action handler
+const doRight = $(() => {
+ // if the actor is moving then call "moveRight" (see Exports above) and re-render
+ if (store.game.phase === Phase.MOVING) {
+ moveRight(store.game);
+ reRender();
+ }
+});
+// Swap colors in the actor action handler
+const doSwap = $(() => {
+ if (store.game.phase === Phase.MOVING) {
+ // if the actor is moving then call "swapActorColors" (see Exports above) and re-render
+ swapActorColors(store.game);
+ reRender();
+ }
+});
+An important note regarding Qwik +I suppose you just noticed that the functions above are wrapped by $(). It could look weird for a non-Qwik guy. I want to explain the reason for $() usage because it’s essential if you want to understand the most essential part of Qwik.
+Let’s remove $() from, say, reRender and run the app… We will see the error below.
+const reRender = () => {
+ render(store.game, svgRef, store.width, store.height, store.blockSize);
+};
+Captured variable in the closure can not be serialized because it's a function named "reRender". You might need to convert it to a QRL using $(fn):
+
+const reRender = $(()=>{
+
+ render(store.game, svgRef, store.width, store.height, store.blockSize);
+ });
+
+Please check out https://qwik.builder.io/docs/advanced/qrl/ for more information.
+If we refactor the function above the following way
+function reRender() {
+ render(store.game, svgRef, store.width, store.height, store.blockSize);
+}
+then we also get an error.
+Seems like you are referencing "reRender" inside a different scope ($), when this happens, Qwik needs to serialize the value, however it is a function, which is not serializable.
+Did you mean to wrap it in `$()`?
+
+The cause of the errors above is Qwik architecture. The fact is that Qwik is uniquely focused on performance, and I'd like to recommend that you dig into the QRL idea.
+
+QRL (Qwik URL) is a particular form of URL that Qwik uses to lazy load content.
+In this context, I recommend you read the following brief but eloquent explanations of Qwik fundamentals.
+In short, we must wrap our functions by $() to make them Qwik-friendly!
+Let’s dig into the heart of the game. I used a heart as allegory because our game looks like a living thing. In our case, we are talking about a 700-millisecond interval that keeps the gameplay. I intentionally don’t want to write comments in the code because it does not need extra explanations. Just read and enjoy it!
+useVisibleTask$(({ cleanup }: { cleanup: Function }) => {
+ setSvgDimension(containerRef, store);
+ const intervalId = setInterval(() => {
+ const game = store.game;
+
+ if (game.phase === Phase.MOVING) {
+ if (isNextMovePossible(game)) {
+ actorDown(game);
+ } else {
+ endActorSession(game);
+ if (isFinish(game)) {
+ game.phase = Phase.INACTIVE;
+ store.gameOverPopup = true;
+ } else {
+ game.phase = Phase.MATCH_REQUEST;
+ }
+ }
+ } else if (game.phase === Phase.MATCH_REQUEST) {
+ const matched = matching(game, true);
+ if (matched) {
+ game.phase = Phase.COLLAPSE_REQUEST;
+ } else {
+ doNextActor(game);
+ game.phase = Phase.MOVING;
+ }
+ } else if (game.phase === Phase.COLLAPSE_REQUEST) {
+ collapse(game);
+ game.phase = Phase.MATCH_REQUEST;
+ }
+
+ reRender();
+ }, 700);
+ cleanup(() => clearInterval(intervalId));
+});
+And finally, there is a small stitch regarding the finished game criteria.
+useTask$(({ track }: { track: Function }) => {
+ track(() => store.gameOverPopup);
+
+ if (store.gameOverPopup) {
+ console.log("Game Over!");
+ }
+});
+We need to track the gameOverPopup field and inform the user. Please, don’t worry about console.log. We will deal with it in the next episode.
+I hope this episode was exciting despite its size ;) Feel free to read and run the related solution.
+Are you worrying about the situations when you decide about the actor’s colors and need to move the actor somehow immediately to the bottom of the board? What about controls like "Start," "Stop," and "Pause"? What about the score and the speed of the actor’s movement?
+I’m going to reveal these points in the next episode. Stay in touch!
+Congratulations! We are in the final stretch! Let’s summarize our previous points.
+In Episode 01, I introduced you to the Columns Game history and concept.
+Episode 02 is devoted to initial technical points like Bootstrapping, Libraries, and gameplay’s first steps.
+Episode 03 is the most complicated and exciting. In this episode, we passed all the mandatory steps to get the natural Qwik Columns gameplay.
+Despite the above, our current solution still needs to be finished for the reasons below.
+A player can set the game on pause.
+Move the actor somehow immediately to the bottom of the board. This feature is essential because, in many cases, a player has decided on the final actor’s color combination, and the player wants to drop it to the bottom immediately. Moreover, the dropping above is preferable to be animated.
+Provide visual controls "Start," "Stop," and "Pause" as a set of buttons.
+Provide different customized speeds for the moving actor, like "Slow," "Normal," and "Fast," with related score calculation. Slow speed gives us less score; Fast pace gives us more.
+Visual end of the game. Currently, we have only "console.log" when the game has finished.
+UI footer, preferably UI responsive. +Traditionally I’ll explain all my modifications step by step. My impatient readers can read and run the final working solution.
+If you want to trace future steps with me, please use the destination code from the previous episode.
+Let’s get started!
+First, let’s deal with src/components/game-play/game-logic.ts. Please, read my comments in the code!
+Pay attention to the new Phase
+export enum Phase {
+ INACTIVE,
+ // Pause is ON
+ PAUSED,
+ MOVING,
+ // Drop is requested
+ DROP,
+ // Drop action is under progress
+ FLYING,
+ MATCH_REQUEST,
+ COLLAPSE_REQUEST,
+}
+and change the main game definition.
+export interface Game {
+ board: ColumnsColor[][];
+ actor: Actor;
+ phase: Phase;
+ // we need to save our current phase before pause
+ savedPhase: Phase;
+ nextActor: ColumnsColor[];
+ score: number;
+ // this is a key/value-based score descriptors
+ // key describes type of speed, Slow, Normal, Fast
+ // value is a related score extent
+ scores: { [key: string]: number };
+}
+Pass current speed (Level) to matching function. It’s needed for score calculation.
+export function matching(
+ game: Game,
+ // new parameter
+ level: Level,
+ mark: boolean,
+ customBoard?: ColumnsColor[][]
+) {
+ // ...
+ function checkCollapsed(match: boolean[][], mark: boolean): boolean {
+ let result = false;
+ for (let row = 0; row < rowsQty; row++) {
+ for (let col = 0; col < columnsQty; col++) {
+ if (match[row][col]) {
+ if (mark) {
+ board[row][col] = colorsToDisappearHash[board[row][col]];
+ // calculate the score according to the level
+ game.score += game.scores[level];
+ }
+ result = true;
+ }
+ }
+ }
+ return result;
+ }
+
+ // ...
+ return checkCollapsed(match, mark);
+}
+isFinish should also know about the level
+export function isFinish(game: Game, level: Level): boolean {
+ // ...
+ if (matching(game, level, false, getNextBoard())) {
+ return false;
+ }
+ // ...
+}
+Add pause implementation.
+export function pause(game: Game) {
+ if (game.phase === Phase.PAUSED) {
+ game.phase = game.savedPhase;
+ } else {
+ game.savedPhase = game.phase;
+ game.phase = Phase.PAUSED;
+ }
+}
+That’s it regarding the gameplay definitions. It’s time to focus on src/components/game-play/game.tsx
+There are the expected definitions.
+// Levels definitions
+export enum Level {
+ SLOW = "SLOW",
+ NORMAL = "NORMAL",
+ FAST = "FAST",
+}
+// Connects levels with the interval speeds in milliseconds
+export const SPEEDS = {
+ [Level.SLOW]: 1000,
+ [Level.NORMAL]: 500,
+ [Level.FAST]: 200,
+};
+// Connects levels with the scores
+export const SCORES = {
+ [Level.SLOW]: 1,
+ [Level.NORMAL]: 2,
+ [Level.FAST]: 3,
+};
+Please, look at the following fragment of code. We need to draw the actor also if the phase is PAUSED and DROP.
+There are the following changes in the store
+export interface MainStore {
+ width: number;
+ height: number;
+ game: Game;
+ blockSize: number;
+ // Add the level
+ level: Level;
+ // We need to control intervalId (start, stop).
+ // It's important in the context of Controls (see above).
+ intervalId: any | null;
+ gameOverPopup: boolean;
+}
+and related changed for the initial state.
+const store = useStore<MainStore>({
+ width: 0,
+ height: 0,
+ game: {
+ board: [...initData],
+ actor: {
+ state: [...initActor],
+ column: Math.floor(initData[0].length / 2),
+ row: -2,
+ },
+ // Start from INACTIVE instead MOVING.
+ // Now the game is inactive and the user should press Play button.
+ phase: Phase.INACTIVE,
+ // add this one...
+ savedPhase: Phase.INACTIVE,
+ nextActor: randomColors(3),
+ score: 0,
+ scores: SCORES,
+ },
+ blockSize: 0,
+ // initial level will be NORMAL
+ level: Level.NORMAL,
+ // add this one...
+ intervalId: null,
+ gameOverPopup: false,
+});
+At this point, I’d like to interrupt my telling and share one tricky Qwik feature with you. The following information has been taken from the official Qwik documentation.
+According to Qwik Deep Objects
+export const MyComp = component$(() => {
+ const store = useStore({
+ person: { first: null, last: null },
+ location: null,
+ });
+
+ store.location = { street: "main st" };
+
+ return (
+ <section>
+ <p>
+ {store.person.last}, {store.person.first}
+ </p>
+ <p>{store.location.street}</p>
+ </section>
+ );
+});
+In the above examples, Qwik will automatically wrap child objects person and location into a proxy and correctly create subscriptions on all deep properties.
+The wrapping behavior described above has one surprising side-effect. Writing and reading from a proxy auto wraps the object, which means that the identity of the object changes. This should normally not be an issue, but it is something that the developer should keep in mind.
+export const MyComp = component$(() => {
+ const store = useStore({ person: null });
+ const person = { first: "John", last: "Smith" };
+ store.person = person; // store.person auto wraps object into proxy
+
+ if (store.person !== person) {
+ // The consequence of auto wrapping is that the object identity changes.
+ console.log("store auto-wrapped person into a proxy");
+ }
+});
+Let’s continue and move all core logic to a separate moveTick function. Please, read my comments there. Uncommented logic is the same as in the previous episode.
+// Create a separate function
+const moveTick = $(() => {
+ const game = store.game;
+
+ // Do nothing if the actor is under dropping, I'll explain it below.
+ if (game.phase === Phase.FLYING) {
+ return;
+ }
+
+ if (game.phase === Phase.MOVING) {
+ if (isNextMovePossible(game)) {
+ actorDown(game);
+ } else {
+ endActorSession(game);
+ // Pass the level.
+ if (isFinish(game, store.level)) {
+ game.phase = Phase.INACTIVE;
+ store.gameOverPopup = true;
+ } else {
+ game.phase = Phase.MATCH_REQUEST;
+ }
+ }
+ // If the current phase is DROP.
+ } else if (game.phase === Phase.DROP) {
+ // We actually don't need to change the current state of the game
+ // that's why we create a clone of the game
+ const gameClone = clone(game);
+
+ // Calculate how many steps should be passed to reach the bottom.
+ let steps = 0;
+ // Iterate until the next move is possible.
+ while (isNextMovePossible(gameClone)) {
+ // Move the actor (on the cloned game) one step down.
+ actorDown(gameClone);
+ // Increase the steps counter
+ steps++;
+ }
+ // Pass the related steps to the "render" function.
+ // It causes the DROP animation running. I'll explain it a bit later.
+ reRender(steps);
+ return;
+ } else if (game.phase === Phase.MATCH_REQUEST) {
+ // Pass the level
+ const matched = matching(game, store.level, true);
+ if (matched) {
+ game.phase = Phase.COLLAPSE_REQUEST;
+ } else {
+ doNextActor(game);
+ game.phase = Phase.MOVING;
+ }
+ } else if (game.phase === Phase.COLLAPSE_REQUEST) {
+ collapse(game);
+ game.phase = Phase.MATCH_REQUEST;
+ }
+
+ reRender();
+});
+Add doDrop function
+ const doDrop = $(() => {
+ if (store.game.phase === Phase.MOVING) {
+ store.game.phase = Phase.DROP;
+ }
+ });
+and add the related keys binding
+ useOnDocument(
+ "keypress",
+ $((event) => {
+ const keyEvent = event as KeyboardEvent;
+ const { phase } = store.game;
+ if (phase !== Phase.MOVING) {
+ return;
+ }
+ if (keyEvent.code === "KeyA") {
+ // ...
+ } else if (keyEvent.code === "KeyS" || keyEvent.code === "Space") {
+ doDrop();
+ }
+ // ...
+ })
+ );
+Attention! The part below is the most tricky here. Please, read my comments in the code carefully!
+// Just added steps as an optional parameter.
+const reRender = $((steps?: number) => {
+ render(store.game, svgRef, store.width, store.height, store.blockSize, steps);
+});
+export function render(
+ game: Game,
+ svgRef: Signal<Element | undefined>,
+ width: number,
+ height: number,
+ blockSize: number,
+ // New parameter
+ passThroughSteps?: number
+) {
+ // ...
+ // Also, render the actor if the current phase is PAUSED or DROP
+ if (
+ game.phase === Phase.MOVING ||
+ game.phase === Phase.PAUSED ||
+ game.phase === Phase.DROP
+ ) {
+ // ...
+ svg
+ .selectAll()
+ .data(actorData)
+ .enter()
+ .append("g")
+ .append("rect")
+ // All shapes related to the actor should have "could-fly" class.
+ // This class is a fake one and we use it for future animation
+ .attr("class", "could-fly")
+ // ...
+ .attr("stroke-width", 1);
+
+ // We need to run "flying" process if passThroughSteps is existing
+ if (passThroughSteps) {
+ // Set the phase.
+ // Now "flying process" will be simultaneous with the current interval,
+ // but the current interval's handler will ignore any activity;
+ // see the code under the following comment: "if the current phase is DROP"
+ game.phase = Phase.FLYING;
+
+ // This is a good example of D3 animation.
+ svg
+ // We need to select all shapes includes "could-fly" fake class (the whole actor)
+ .selectAll(".could-fly")
+ // Run animation.
+ // Pay attention! This process is asynchronous!
+ .transition()
+ // with 700ms duration
+ .duration(700)
+ // This aim of animation is moving the current actor
+ // to the Y-axis destination that equals
+ // current actor's Y + passThroughSteps * blockSize.
+ .attr("y", (d: any) => d.y + passThroughSteps * blockSize)
+ // don't mix with on('end', ...); it doesn't work in this case (D3 feature)
+ .end()
+ .then(() => {
+ // change the state of the game
+ // when asynchronous animation process has been finished
+ // move the actor down passThroughSteps for the current game
+ actorDown(game, passThroughSteps);
+ // let's move!
+ game.phase = Phase.MOVING;
+ });
+ }
+ }
+}
+Let’s briefly repeat the DROP animation concept.
+If the DROP action happens, we need to calculate how many steps the actor should fly (animation distance) to reach the bottom.
+Set the current phase to FLYING.
+Run animation when Y axis destination (vertical) of the actor equals current actor’s Y + passThroughSteps * blockSize
+Wait for the end of the animation and move the actor down passThroughSteps
+Set the phase back to MOVING +That’s it about DROP.
+useVisibleTask$ became much simpler because we moved all of the logic there to moveTick function above!
+useVisibleTask$(({ cleanup }: { cleanup: Function }) => {
+ setSvgDimension(containerRef, store);
+ // create the interval an save it in the store
+ // because we should be able to control this interval outside useVisibleTask$
+ // SPEEDS[store.level] describes the current game speed by the level
+ store.intervalId = setInterval(moveTick, SPEEDS[store.level]);
+ cleanup(() => clearInterval(store.intervalId));
+});
+Some notes regarding the end of the game.
+useTask$(({ track }: { track: Function }) => {
+ track(() => store.gameOverPopup);
+
+ // track gameOverPopup, if it fires then hide it (the related popup) after 5 seconds
+ if (store.gameOverPopup) {
+ setTimeout(() => {
+ store.gameOverPopup = false;
+ }, 5000);
+ }
+});
+Please, also, look at the related UI part of the code. The HTML block appears only if store.gameOverPopup is true. I guess, React guys should be familiar with this technique.
+{
+ store.gameOverPopup && (
+ <div class="fixed top-1/2 left-1/2 -translate-y-1/2 -translate-x-1/2 z-50 w-72 text-center max-w-sm p-6 bg-white text-white border border-gray-200 rounded-lg shadow dark:bg-gray-800 dark:border-gray-700 z-50">
+ GAME OVER
+ </div>
+ );
+}
+The time finally came, and I’m glad to provide the way how to use Controls components. Don’t worry. I’ll focus on the component’s details a bit later.
+<Controls
+ game={store.game}
+ blockSize={15}
+ level={store.level}
+ onStart$={() => {
+ // start the game
+ init(store.game);
+ store.gameOverPopup = false;
+ store.game.phase = Phase.MOVING;
+ }}
+ onPause$={() => {
+ // pause the game
+ pause(store.game);
+ }}
+ onStop$={() => {
+ // stop the game
+ store.game.phase = Phase.INACTIVE;
+ store.gameOverPopup = true;
+ }}
+ // and also pass other activities
+ onLeft$={doLeft}
+ onRight$={doRight}
+ onSwap$={doSwap}
+ onDrop$={doDrop}
+ // including the level switching
+ onLevel$={(level: Level) => {
+ // update the level
+ store.level = level;
+ // clear the current interval if it's exists
+ if (store.intervalId !== null) {
+ clearInterval(store.intervalId);
+ }
+ // re-create the interval
+ store.intervalId = setInterval(moveTick, SPEEDS[store.level]);
+ }}
+/>
+Here is Controls component.
+import type { PropFunction, Signal } from "@builder.io/qwik";
+import { useSignal } from "@builder.io/qwik";
+import { component$ } from "@builder.io/qwik";
+import * as d3 from "d3";
+import type { Game } from "./game-logic";
+import { Phase } from "./game-logic";
+import { Level } from "./game";
+
+interface ControlsProps {
+ game: Game;
+ onStart$: PropFunction<() => void>;
+ onPause$: PropFunction<() => void>;
+ onStop$: PropFunction<() => void>;
+ onLeft$: PropFunction<() => void>;
+ onRight$: PropFunction<() => void>;
+ onSwap$: PropFunction<() => void>;
+ onDrop$: PropFunction<() => void>;
+ onLevel$: PropFunction<(level: Level) => void>;
+ blockSize: number;
+ level: Level;
+}
+
+// This function is responsible for the "next actor" rendering
+export function renderNextActor(
+ data: string[],
+ size: number,
+ svgRef: Signal<Element | undefined>
+) {
+ // this logic is similar to the logic from
+ // https://github.com/buchslava/qwik-columns/blob/final-devto-edition/src/components/game-play/game.tsx#L56
+ if (!svgRef.value) {
+ return;
+ }
+ const svg = d3.select(svgRef.value);
+
+ svg.selectAll("*").remove();
+
+ svg
+ .append("svg")
+ .attr("width", size)
+ .attr("height", size * data.length)
+ .append("g")
+ .attr("transform", "translate(0,0)");
+
+ const displayData = data.map((d, i) => ({
+ value: d,
+ y: i * size,
+ size,
+ }));
+
+ svg
+ .selectAll()
+ .data(displayData)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", (d) => d.size)
+ .attr("y", (d) => d.y)
+ .attr("height", (d) => d.size)
+ // @ts-ignore
+ .attr("fill", (d) => d3.color(d.value))
+ .attr("stroke", "#000000")
+ .attr("stroke-width", 1);
+}
+
+export default component$<ControlsProps>(
+ ({
+ onStart$,
+ onPause$,
+ onStop$,
+ onLeft$,
+ onRight$,
+ onSwap$,
+ onDrop$,
+ onLevel$,
+ game,
+ blockSize,
+ level,
+ }) => {
+ const svgRef = useSignal<Element>();
+
+ // run next actor rendering
+ renderNextActor(game.nextActor, blockSize, svgRef);
+
+ return (
+ <div class="relative text-white w-72 h-48">
+ <div class="pl-3 inset-x-0 top-0">
+ <div class="mb-5 text-base lg:text-2xl md:text-xl font-extrabold font-mono">
+ SCORE: {game.score}
+ </div>
+ <div class="mb-5">
+ <div class="bg-white w-32 pt-2 pb-2 flex justify-center">
+ // this is a SVG for the next actor rendering
+ <svg
+ width={blockSize}
+ height={blockSize * game.nextActor.length}
+ ref={svgRef}
+ />
+ </div>
+ </div>
+ // We need to show "Start" button only if the phase is INACTIVE
+ {game.phase === Phase.INACTIVE && (
+ <div class="mb-5">
+ <button
+ // use the passed (input property) function
+ onClick$={onStart$}
+ type="button"
+ class="font-mono px-8 py-3 w-32 text-white bg-pink-300 rounded focus:outline-none"
+ >
+ START
+ </button>
+ </div>
+ )}
+ // We need to show "Stop" and "Pause" button only if the phase is NOT
+ INACTIVE
+ {game.phase !== Phase.INACTIVE && (
+ <div class="mb-5">
+ <button
+ // use the passed (input property) function
+ onClick$={onPause$}
+ type="button"
+ class="font-mono px-8 py-3 w-32 text-white bg-blue-300 rounded focus:outline-none"
+ >
+ {game.phase === Phase.PAUSED ? "GO" : "PAUSE"}
+ </button>
+ </div>
+ )}
+ {game.phase !== Phase.INACTIVE && (
+ <div class="mb-5">
+ <button
+ // use the passed (input property) function
+ onClick$={onStop$}
+ type="button"
+ class="font-mono px-8 py-3 w-32 text-white bg-gray-300 rounded focus:outline-none"
+ >
+ STOP
+ </button>
+ </div>
+ )}
+ </div>
+ <div class="pl-3 mb-5 flex w-36 justify-between">
+ // 1-st Slow level
+ <button
+ onClick$={() => {
+ // pass SLOW to the function in the parent component
+ onLevel$(Level.SLOW);
+ }}
+ type="button"
+ // we use dynamic class with Tailwind-based classes to highlight the current level
+ class={[
+ "w-10 py-3 text-white rounded focus:outline-none",
+ level === Level.SLOW ? "bg-green-700" : "bg-yellow-500",
+ ]}
+ disabled={level === Level.SLOW}
+ >
+ 1
+ </button>
+ // 2-nd Normal level
+ <button
+ onClick$={() => {
+ // pass NORMAL to the function in the parent component
+ onLevel$(Level.NORMAL);
+ }}
+ type="button"
+ class={[
+ "w-10 py-3 text-white rounded focus:outline-none",
+ level === Level.NORMAL ? "bg-green-700" : "bg-yellow-500",
+ ]}
+ disabled={level === Level.NORMAL}
+ >
+ 2
+ </button>
+ // 3-rd Fast level
+ <button
+ onClick$={() => {
+ // pass FAST to the function in the parent component
+ onLevel$(Level.FAST);
+ }}
+ type="button"
+ class={[
+ "w-10 py-3 text-white rounded focus:outline-none",
+ level === Level.FAST ? "bg-green-700" : "bg-yellow-500",
+ ]}
+ disabled={level === Level.FAST}
+ >
+ 3
+ </button>
+ </div>
+ // Display other control buttons if the phase is NOT INACTIVE
+ {game.phase !== Phase.INACTIVE && (
+ <div class="pl-3 grid grid-rows-3 grid-cols-2 gap-4">
+ // Swap the actor colors (alternative "W" key)
+ <div class="col-span-2">
+ <button
+ onClick$={onSwap$}
+ type="button"
+ class="text-2xl py-3 w-32 text-white bg-gray-400 rounded focus:outline-none"
+ >
+ W
+ </button>
+ </div>
+ // Move the actor left (alternative "A" key)
+ <div class="w-32 grid grid-flow-col justify-stretch">
+ <button
+ onClick$={onLeft$}
+ type="button"
+ class="text-2xl mr-2 py-3 text-white bg-green-300 rounded focus:outline-none"
+ >
+ A
+ </button>
+ // Move the actor right (alternative "D" key)
+ <button
+ onClick$={onRight$}
+ type="button"
+ class="text-2xl ml-2 py-3 text-white bg-green-300 rounded focus:outline-none"
+ >
+ D
+ </button>
+ </div>
+ // Drop the actor (alternative "W" key)
+ <div class="col-span-2">
+ <button
+ onClick$={onDrop$}
+ type="button"
+ class="text-2xl py-3 w-32 text-white bg-gray-400 rounded focus:outline-none"
+ >
+ S
+ </button>
+ </div>
+ </div>
+ )}
+ </div>
+ );
+ }
+);
+Finally, lets make the last stitch. We need to set the footer.
+return (
+ <div class="flex justify-center w-screen h-screen pt-5" ref={containerRef}>
+ {store.gameOverPopup && (
+ <div class="fixed top-1/2 left-1/2 -translate-y-1/2 -translate-x-1/2 z-50 w-72 text-center max-w-sm p-6 bg-white text-white border border-gray-200 rounded-lg shadow dark:bg-gray-800 dark:border-gray-700 z-50">
+ GAME OVER
+ </div>
+ )}
+ <div>
+ <svg
+ class="game-area"
+ width={store.width}
+ height={store.height}
+ ref={svgRef}
+ />
+ </div>
+ <Controls
+ // ...
+ />
+ <Footer />
+ </div>
+);
+Here the component’s code.
+import { component$ } from "@builder.io/qwik";
+
+export default component$(() => {
+ // The year of copyright should be dynamic.
+ const year = new Date().getFullYear();
+
+ return (
+ <div class="fixed bottom-0 left-0 w-full h-8 text-center text-white bg-gray-600">
+ // see my comment below
+ <span class="hidden lg:inline">
+ <span class="text-red-500 font-bold font-mono text-xl pr-2">C</span>
+ <span class="text-yellow-500 font-bold font-mono text-xl pr-2">O</span>
+ <span class="text-green-500 font-bold font-mono text-xl pr-2">L</span>
+ <span class="text-blue-500 font-bold font-mono text-xl pr-2">U</span>
+ <span class="text-teal-500 font-bold font-mono text-xl pr-2">M</span>
+ <span class="text-fuchsia-500 font-bold font-mono text-xl pr-2">N</span>
+ <span class="text-lime-500 font-bold font-mono text-xl pr-7">S</span>
+ </span>
+ <span class="text-sm text-white">
+ <a
+ href="https://valor-software.com/"
+ class="no-underline hover:underline"
+ target="_blank"
+ >
+ Valor Software
+ </a>{" "}
+ edition. (C) {year},{" "}
+ <a
+ href="https://dev.to/buchslava"
+ class="no-underline hover:underline"
+ target="_blank"
+ >
+ Vyacheslav Chub
+ </a>
+ </span>
+ </div>
+ );
+});
+Tailwind classes `hidden lg:inline' there means that we show "C O L U M N S" spans only on large screens. You can gain more knowledge on Tailwind Responsive Design here.
+git clone git@github.com:buchslava/qwik-columns.git +cd qwik-columns +git checkout final-devto-edition +npm ci +npm start
+Start the game. Switch the speed
+Core controls
+The end of the game
+The responsive footer
+Traditionally, I’d like to start the article with the following. A few months ago, I faced a situation when my customer asked for a multi-language feature on a NextJS-based solution. The issue is that providing all local content with its vast volume and limited budget is impossible.
+In other words, we have only one local version, say, English, and we need to translate it automatically to some others, say, Italian, Spanish, French, etc. But this isn’t over. The future language switcher should be friendly with the current UI and 100% under the developer’s control.
+I started thinking and found that only one approach was suitable. It doesn’t require additional settings on the Google Console side and allows us to translate to any language without pain.
+You can try the solution, by the way.
+However, the following problems still need to be solved.
+Not the fact that the solution above, as it is, matched with NextJS specific. +The standard dropdown component looks too generic and is not customizable as the customer requested. +I don’t want to put my routine of the research process on your plate, but describe the final decision step by step. If you want to face with my final solution now, please look at https://github.com/buchslava/nextjs-gtrans-demo.
+Let’s get started with the explanation!
+Create a new NextJS project.
+npx create-next-app@latest
+What is your project named? -> nextjs-gtrans-demo
+Would you like to use TypeScript? -> Yes
+Would you like to use ESLint? -> No
+Would you like to use Tailwind CSS? -> Yes
+Would you like to use `src/` directory? -> Yes
+Would you like to use App Router? -> No
+Would you like to customize the default import alias? -> No
+Also, install one extra dependency.
+npm i nookies --save
+Now we can run the app
+npm run dev
+It’s time to implement the solution into the app. Please don’t worry if you don’t find some expected components during placing the code. Future steps will resolve it.
+Let’s change content in src/pages/index.tsx
+import { LanguageSwitcher } from "./lang-switcher";
+
+export default function Home() {
+ return (
+ <div className="h-screen flex flex-col">
+ <header className="w-full pt-4">
+ <LanguageSwitcher />
+ </header>
+ <div className="flex flex-col flex-1 overflow-auto">
+ <hr className="h-px my-8 bg-gray-200 border-0 dark:bg-gray-700" />
+ <article>
+ <h2 className="mb-4 text-4xl font-extrabold leading-none tracking-tight text-gray-900 md:text-5xl lg:text-3xl dark:text-white">
+ What is Lorem Ipsum?
+ </h2>
+ <p className="mb-7">
+ Lorem Ipsum is simply dummy text of the printing and typesetting
+ industry....
+ </p>
+ </article>
+ // This is a part of the content. Please take the full version for the original solution!
+ </div>
+ <footer>
+ <p className="mt-3">
+ <a
+ href="https://www.lipsum.com/"
+ target="_blank"
+ className="inline-flex items-center justify-center px-5 py-3 text-base font-medium text-center text-white bg-blue-700 rounded-lg hover:bg-blue-800 focus:ring-4 focus:ring-blue-300 dark:focus:ring-blue-900"
+ >
+ Source
+ <svg
+ className="w-3.5 h-3.5 ml-2"
+ aria-hidden="true"
+ xmlns="http://www.w3.org/2000/svg"
+ fill="none"
+ viewBox="0 0 14 10"
+ >
+ <path
+ stroke="currentColor"
+ stroke-linecap="round"
+ stroke-linejoin="round"
+ stroke-width="2"
+ d="M1 5h12m0 0L9 1m4 4L9 9"
+ />
+ </svg>
+ </a>
+ </p>
+ </footer>
+ </div>
+ );
+}
+I recommend temporarily forgetting the LanguageSwitcher component and focusing on the content mentioned above. The file contains three logical parts.
+The header includes the language switcher component (will be described later)
+The central part includes four paragraphs regarding Lorem Ipsum explanation
+The footer contains a button as a link to the source of the content
+Let’s change src/pages/_document.tsx
+import { Html, Head, Main, NextScript } from "next/document";
+import Script from "next/script";
+
+export default function Document() {
+ return (
+ <Html>
+ <Head>
+ <Script
+ src="/assets/scripts/lang-config.js"
+ strategy="beforeInteractive"
+ />
+ <Script
+ src="/assets/scripts/translation.js"
+ strategy="beforeInteractive"
+ />
+ <Script
+ src="//translate.google.com/translate_a/element.js?cb=TranslateInit"
+ strategy="afterInteractive"
+ />
+ </Head>
+ <body>
+ <Main />
+ <NextScript />
+ </body>
+ </Html>
+ );
+}
+The main difference between this file and the default one is a set of three scripts below.
+public/assets/scripts/lang-config.js contains custom languages settings
+public/assets/scripts/translation.js contains TranslateInit callback function definition that will be used as a parameter to the main translation script
+//translate.google.com/translate_a/element.js?cb=TranslateInit - the main translation script by Google. Pay attention on cb=TranslateInit. The callback function must be passed here.
+It’s time to provide the code of the scripts mentioned above.
+window.__GOOGLE_TRANSLATION_CONFIG__ = {
+ languages: [
+ { title: "English", name: "en" },
+ { title: "Deutsch", name: "de" },
+ { title: "Español", name: "es" },
+ { title: "Français", name: "fr" },
+ ],
+ defaultLanguage: "en",
+};
+In this example, we declared four languages to use. +public/assets/scripts/translation.js
+function TranslateInit() {
+ if (!window.__GOOGLE_TRANSLATION_CONFIG__) {
+ return;
+ }
+ new google.translate.TranslateElement({
+ pageLanguage: window.__GOOGLE_TRANSLATION_CONFIG__.defaultLanguage,
+ });
+}
+Here is a callback definition that includes google.translate.TranslateElement call. If we don’t have the configuration, we pass it to Google’s script nothing i.e. an empty callback. Otherwise, we call google.translate.TranslateElement and pass the original content language.
+And it’s finally time to provide and explain the most critical part of the solution. I’m talking about the LanguageSwitcher mentioned before.
+src/components/lang-switcher.tsx +Please, pay attention to the comments inside the code below.
+import { useEffect, useState } from "react";
+import { parseCookies, setCookie } from "nookies";
+
+// The following cookie name is important because it's Google-predefined for the translation engine purpose
+const COOKIE_NAME = "googtrans";
+
+// We should know a predefined nickname of a language and provide its title (the name for displaying)
+interface LanguageDescriptor {
+ name: string;
+ title: string;
+}
+
+// The following definition describes typings for JS-based declarations in public/assets/scripts/lang-config.js
+declare global {
+ namespace globalThis {
+ var __GOOGLE_TRANSLATION_CONFIG__: {
+ languages: LanguageDescriptor[];
+ defaultLanguage: string;
+ };
+ }
+}
+
+const LanguageSwitcher = () => {
+ const [currentLanguage, setCurrentLanguage] = useState<string>();
+ const [languageConfig, setLanguageConfig] = useState<any>();
+
+ // When the component has initialized, we must activate the translation engine the following way.
+ useEffect(() => {
+ // 1. Read the cookie
+ const cookies = parseCookies()
+ const existingLanguageCookieValue = cookies[COOKIE_NAME];
+
+ let languageValue;
+ if (existingLanguageCookieValue) {
+ // 2. If the cookie is defined, extract a language nickname from there.
+ const sp = existingLanguageCookieValue.split("/");
+ if (sp.length > 2) {
+ languageValue = sp[2];
+ }
+ }
+ // 3. If __GOOGLE_TRANSLATION_CONFIG__ is defined and we still not decided about languageValue, let's take a current language from the predefined defaultLanguage below.
+ if (global.__GOOGLE_TRANSLATION_CONFIG__ && !languageValue) {
+ languageValue = global.__GOOGLE_TRANSLATION_CONFIG__.defaultLanguage;
+ }
+ if (languageValue) {
+ // 4. Set the current language if we have a related decision.
+ setCurrentLanguage(languageValue);
+ }
+ // 5. Set the language config.
+ if (global.__GOOGLE_TRANSLATION_CONFIG__) {
+ setLanguageConfig(global.__GOOGLE_TRANSLATION_CONFIG__);
+ }
+ }, []);
+
+ // Don't display anything if current language information is unavailable.
+ if (!currentLanguage || !languageConfig) {
+ return null;
+ }
+
+ // The following function switches the current language
+ const switchLanguage = (lang: string) => () => {
+ // We just need to set the related cookie and reload the page
+ // "/auto/" prefix is Google's definition as far as a cookie name
+ setCookie(null, COOKIE_NAME, "/auto/" + lang)
+ window.location.reload();
+ };
+
+ return (
+ <div className="text-center notranslate">
+ {languageConfig.languages.map((ld: LanguageDescriptor, i: number) => (
+ <>
+ {currentLanguage === ld.name ||
+ (currentLanguage === "auto" &&
+ languageConfig.defaultLanguage === ld) ? (
+ <span key={`l_s_${ld}`} className="mx-3 text-orange-300">
+ {ld.title}
+ </span>
+ ) : (
+ <a
+ key={`l_s_${ld}`}
+ onClick={switchLanguage(ld.name)}
+ className="mx-3 text-blue-300 cursor-pointer hover:underline"
+ >
+ {ld.title}
+ </a>
+ )}
+ </>
+ ))}
+ </div>
+ );
+};
+
+export { LanguageSwitcher, COOKIE_NAME };
+Pay attention to notranslate class in the root div before. This is also Google’s definition. It means that all of the content inside should not be translated. It’s crucial because language titles should stay untouched, i.e., as they are.
+It’s time to gather all the information above and explain how the solution works. +The start point is placed in src/pages/_document.tsx
+import { Html, Head, Main, NextScript } from "next/document";
+import Script from "next/script";
+
+export default function Document() {
+ return (
+ <Html>
+ <Head>
+ <Script
+ src="/assets/scripts/lang-config.js"
+ strategy="beforeInteractive"
+ />
+ <Script
+ src="/assets/scripts/translation.js"
+ strategy="beforeInteractive"
+ />
+ <Script
+ src="//translate.google.com/translate_a/element.js?cb=TranslateInit"
+ strategy="afterInteractive"
+ />
+ </Head>
+ <body>
+ <Main />
+ <NextScript />
+ </body>
+ </Html>
+ );
+}
+The first one contains language configuration
+The second one contains a callback with the translation logic runner
+Standard Google’s script gets the callback described before and runs it.
+Pay attention to the following facts.
+We use Script tag from next/script because of NextJS ;)
+We use strategy="beforeInteractive" for a couple of first scripts
+We use strategy="afterInteractive" for the last one
+It’s important. More information regarding the above you can find here. Let me provide you some related theory.
+beforeInteractive: Load the script before any Next.js code and before any page hydration occurs.
+afterInteractive: (default) Load the script early but after some hydration on the page occurs.
+It’s very easy. When the user presses a new language link, say, for Spanish language, switchLanguage function described above sets /auto/es value for googtrans cookie. This is a message to the translation engine that Spain-translated content is expected. After that switchLanguage reloads the page, and we will see the Spanish content. Google Translate did this job!
+That’s it regarding the main flow. But let me focus on some additional important stuff.
+Let’s run the solution
+npm run dev
+and switch the language, say, Deutsch. However, the issue is that the standard Google Translate bar is still on top.
+We definitely ought to fix it. Let’s add a couple of the following changes to src/styles/globals.css
+Much better now!
+One of the tastiest features of NextJS is Static Site Generation (SSG). Let’s test SSG on this solution.
+We need to add ssg script into package.json
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "ssg": "next build && next export",
+ "start": "next start",
+ "lint": "next lint"
+ },
+Let’s build a static version.
+npm run ssg
+
+> nextjs-gtrans-demo@0.1.0 ssg
+> next build && next export
+
+ ✓ Linting and checking validity of types
+ ✓ Creating an optimized production build
+ ✓ Compiled successfully
+ ✓ Collecting page data
+ ✓ Generating static pages (3/3)
+ ✓ Finalizing page optimization
+
+Route (pages) Size First Load JS
+┌ ○ / 4.24 kB 82.1 kB
+├ /_app 0 B 77.9 kB
+├ ○ /404 181 B 78 kB
+└ λ /api/hello 0 B 77.9 kB
++ First Load JS shared by all 80.1 kB
+ ├ chunks/framework-66d32731bdd20e83.js 45.2 kB
+ ├ chunks/main-12e9c77dbbe57e7c.js 31.5 kB
+ ├ chunks/pages/_app-3cfebadf4e2e7ae1.js 298 B
+ ├ chunks/webpack-5c046346608af636.js 807 B
+ └ css/24fee595fee43abd.css 2.29 kB
+
+λ (Server) server-side renders at runtime (uses getInitialProps or getServerSideProps)
+○ (Static) automatically rendered as static HTML (uses no initial props)
+...........
+ Copying "public" directory
+ ✓ Exporting (3/3)
+Export successful. Files written to /Users/slava/Desktop/projects11/nextjs-gtrans-demo/out
+You can find the static version in out folder.
+Let’s test it. If don’t have http-server installed, please install it.
+npm i -g http-server
+cd ./out
+http-server
+The final solution is here.
+Building user interfaces declaratively is something the Web community has widely adopted, and nowadays, large applications are built following these principles. For example, Google launched Jetpack Compose, and Apple announced SwiftUI at WWDC19, receiving an immensely positive response from developers.
+Here at Valor Software, we are always excited about new advancements in development technologies, and we are fans of NativeScript. We collaborated with nStudio to provide an effective and enjoyable Jetpack Compose integration for Android apps driven by NativeScript.
+Earlier this month we announced SwiftUI for NativeScript, which follow the same principles and API design as Jetpack Compose for NativeScript.
+In this article, we’ll demonstrate how to use Jetpack Compose within NativeScript to explore fun new possibilities in building amazing UIs together.
+We can create an app using a standard TypeScript template:
+ns create jetpackcompose --ts
+cd jetpackcompose
+This will setup what is often called a "vanilla" flavored NativeScript app. You can use whichever flavor you’re most comfortable with, though. Setting the plugin up for Angular (and most other flavors) is usually a case of registering the view, which we’ll demonstrate in a section below.
+npm install @nativescript/jetpack-compose
+
+ Note
+ |
+
+Jetpack Compose requires you to use at least API 21 (Lollipop) as your minimum SDK version. You can do this by adding minSdkVersion 21 to your app.gradle.
+ |
+
If you plan to build your libraries directly from Android Studio, you don’t need anything else, just drop your built .aar
in App_Resources/Android/libs/
and skip to the next section. But if you’re planning on writing Kotlin code directly in .kt
files in App_Resources/Android/src/main/java
, then we need some extra steps.
First, add your compose dependencies in app.gradle
:
dependencies {
+ def compose_version = "1.2.1"
+ implementation "androidx.compose.ui:ui:$compose_version"
+ // Tooling support (Previews, etc.)
+ implementation "androidx.compose.ui:ui-tooling:$compose_version"
+
+ // Add any other dependencies your Jetpack Compose UI needs
+ // like material design:
+ // implementation 'androidx.compose.material:material:$compose_version'
+}
+Then modify the android
section so you enable compose:
android {
+ // other settings like targetSdk, etc.
+
+ buildFeatures {
+ compose true
+ }
+ compileOptions {
+ sourceCompatibility JavaVersion.VERSION_1_8
+ targetCompatibility JavaVersion.VERSION_1_8
+ }
+ kotlinOptions {
+ jvmTarget = "1.8"
+ }
+ composeOptions {
+ kotlinCompilerExtensionVersion '1.3.2'
+ }
+}
+And finally, enable Kotlin by creating the file App_Resources/Android/gradle.properties
useKotlin=true
+kotlinVersion=1.7.20 # you can choose your kotlin version here
+Create your Jetpack Compose views and wrapper
+Create App_Resources/Android/src/main/java/BasicView.kt
:
package com.example
+
+import android.content.Context
+import androidx.compose.material.MaterialTheme
+import androidx.compose.material.Text
+import androidx.compose.runtime.Composable
+import androidx.compose.runtime.getValue
+import androidx.compose.runtime.mutableStateOf
+import androidx.compose.runtime.setValue
+import androidx.compose.ui.platform.ComposeView
+import androidx.lifecycle.ViewModel
+import androidx.lifecycle.viewmodel.compose.viewModel
+
+class BasicView {
+ fun generateComposeView(view: ComposeView): ComposeView {
+ return view.apply {
+ setContent {
+ MaterialTheme {
+ Text("Hello from Jetpack Compose")
+ }
+ }
+ }
+ }
+
+ fun updateData(value: Map<Any, Any>) {
+ }
+ var onEvent: ((String) -> Unit)? = null
+
+}
+To use the default plugin handling of Compose views, it’s important that your implementation follows the following interface:
+class Example {
+ fun generateComposeView(view: ComposeView): ComposeView {
+ // render your compose views into the ComposeView
+ }
+
+ fun updateData(value: Map<Any, Any>) {
+ // this function receives data from NativeScript
+ // value is a js object converted to a map
+ }
+
+ // this is the event you will send back to Jetpack Compose
+ // when you need to pass data, just call onEvent?.invoke(v)
+ var onEvent: ((Any) -> Unit)? = null
+
+}
+Register your Jetpack Compose via the composeId
This can be done in the NativeScript app’s bootstrap file (often app.ts
or main.ts
).
import { registerJetpackCompose, ComposeDataDriver } from '@nativescript/jetpack-compose';
+
+// A. You can generate types for your own Compose Provider with 'ns typings android --aar {path/to/{name}.aar}'
+// B. Otherwise you can ignore by declaring the package resolution path you know you provided
+declare var com;
+registerJetpackCompose('sampleView', (view) => new ComposeDataDriver(new com.example.BasicView(), view));
+Additionally, if you want to use Angular, you can register the compose view itself:
+import { registerElement } from '@nativescript/angular';
+import { JetpackCompose } from '@nativescript/jetpack-compose';
+
+registerElement('JetpackCompose', () => JetpackCompose)
+Insert into any NativeScript layout
+app/main-page.xml
<Page
+ xmlns="http://schemas.nativescript.org/tns.xsd"
+ xmlns:jc="@nativescript/jetpack-compose"
+ class="page">
+ <StackLayout>
+ <jc:JetpackCompose composeId="sampleView" height="100" />
+ </StackLayout>
+</Page>
+You can now run the app with ns debug android
.
After running the app once you can open the platforms/android
folder in Android Studio where you’ll be able to find the BasicView.kt
file. From there you can start modifying it and previewing your changes (by adding the @Preview
decorator on the @Composable
you want to preview).
+ Important
+ |
++Saving this file will not change the BasicView.kt that lives inside your App_Resources, so be VERY careful to copy the file contents back once you’re done editing it! This will become a DX improvement in the future. + | +
Alternatively, you can create a new Android library and develop all your Jetpack Compose views there.
+First, let’s add some bindings to our BasicView so it now receives data in updateData
and displays that, as well as output an event once the data is updated:
package com.example
+
+import android.content.Context
+import androidx.compose.material.MaterialTheme
+import androidx.compose.material.Text
+import androidx.compose.runtime.Composable
+import androidx.compose.runtime.getValue
+import androidx.compose.runtime.mutableStateOf
+import androidx.compose.runtime.setValue
+import androidx.compose.ui.platform.ComposeView
+import androidx.lifecycle.ViewModel
+import androidx.lifecycle.viewmodel.compose.viewModel
+
+class BasicView {
+ data class ExampleUiState(
+ val text: String = ""
+ ) {}
+ class ExampleViewModel(
+ ) : ViewModel() {
+
+ var uiState by mutableStateOf(ExampleUiState())
+ }
+
+ var mViewModel = ExampleViewModel()
+ fun generateComposeView(view: ComposeView): ComposeView {
+
+ return view.apply {
+ setContent {
+ MaterialTheme {
+
+ val uiState = mViewModel.uiState;
+ // In Compose world
+ Text(uiState.text)
+ }
+ }
+ }
+ }
+
+ fun updateData(value: Map<Any, Any>) {
+ val v = value["data"] as String;
+ onEvent?.invoke(v)
+ mViewModel.uiState = ExampleUiState(v);
+ }
+
+ var onEvent: ((String) -> Unit)? = null
+
+}
+app/main-page.xml:
<Page xmlns="http://schemas.nativescript.org/tns.xsd" navigatingTo="navigatingTo" class="page"
+ xmlns:jc="@nativescript/jetpack-compose">
+ <StackLayout>
+ <Label text="The following view is Jetpack Compose inside NativeScript!" textWrap="true"></Label>
+ <jc:JetpackCompose composeEvent="{{ onEvent }}" data="{{ text }}" composeId="sampleView"></sw:JetpackCompose>
+ <Label text="This is NativeScript again"></Label>
+ <TextView textChange="{{ onTextChange }}" text="{{ text }}" textWrap="true"></TextView>
+ </StackLayout>
+</Page>
+app/main-page.ts:
import { Observable } from '@nativescript/core';
+import { registerJetpackCompose, ComposeDataDriver } from '@nativescript/jetpack-compose';
+import { EventData, Page, PropertyChangeData } from '@nativescript/core';
+
+// A. You can generate types for your own Compose Provider with 'ns typings android --aar {path/to/{name}.aar}'
+// B. Otherwise you can ignore by declaring the package resolution path you know you provided
+declare var com;
+registerJetpackCompose('sampleView', (view) => new ComposeDataDriver(new com.example.BasicView(), view));
+
+export function navigatingTo(args: EventData) {
+ const page = <Page>args.object;
+ page.bindingContext = new DemoModel();
+}
+
+export class DemoModel extends Observable {
+ text = '';
+
+ onEvent(evt: JetpackComposeEventData<string>) {
+ console.log('onEvent', evt.data);
+ }
+
+ onTextChange(evt: PropertyChangeData) {
+ console.log('textChange', evt.value);
+ this.set('text', evt.value);
+ }
+}
+Now every time you change the text on the NativeScript TextView
it’ll update the text on the Jetpack Compose view!
Here’s another example where I use a ColorPicker to change a NativeScript view’s background color:
+app.gradle
implementation "com.github.skydoves:colorpicker-compose:1.0.0"
+package com.example
+
+import android.content.Context
+import androidx.compose.foundation.layout.fillMaxSize
+import androidx.compose.foundation.layout.fillMaxWidth
+import androidx.compose.foundation.layout.height
+import androidx.compose.foundation.layout.padding
+import androidx.compose.material.MaterialTheme
+import androidx.compose.material.Text
+import androidx.compose.runtime.Composable
+import androidx.compose.runtime.getValue
+import androidx.compose.runtime.mutableStateOf
+import androidx.compose.runtime.setValue
+import androidx.compose.ui.Modifier
+import androidx.compose.ui.graphics.Color
+import androidx.compose.ui.graphics.ImageBitmap
+import androidx.compose.ui.platform.ComposeView
+import androidx.compose.ui.res.imageResource
+import androidx.compose.ui.unit.dp
+import androidx.lifecycle.ViewModel
+import androidx.lifecycle.viewmodel.compose.viewModel
+import com.github.skydoves.colorpicker.compose.ColorEnvelope
+import com.github.skydoves.colorpicker.compose.HsvColorPicker
+import com.github.skydoves.colorpicker.compose.ImageColorPicker
+import com.github.skydoves.colorpicker.compose.rememberColorPickerController
+
+class ColorPickerCompose {
+ fun generateComposeView(view: ComposeView): ComposeView {
+ return view.apply {
+ setContent {
+ val controller = rememberColorPickerController()
+ HsvColorPicker(
+ modifier = Modifier
+ .fillMaxWidth()
+ .height(450.dp)
+ .padding(10.dp),
+ controller = controller,
+ onColorChanged = { colorEnvelope: ColorEnvelope ->
+ onEvent?.invoke(colorEnvelope.hexCode)
+ }
+ )
+ }
+ }
+ }
+
+ fun updateData(value: Map<Any, Any>) {}
+
+ var onEvent: ((String) -> Unit)? = null
+
+}
+<StackLayout backgroundColor="{{ backgroundColor }}">
+ <Label text="The following view is Jetpack Compose inside NativeScript!" textWrap="true"></Label>
+ <StackLayout backgroundColor="lightblue">
+ <jc:JetpackCompose composeEvent="{{ onEvent }}" data="{{ text }}" composeId="jetpackCompose"></sw:JetpackCompose>
+ </StackLayout>
+ <Label text="This is NativeScript again"></Label>
+ <TextView text="{{ backgroundColor }}" textWrap="true"></TextView>
+</StackLayout>
+Working with Jetpack Compose in NativeScript is very transparent and easy. We look forward in seeing what the community will build with yet another powerful tool in NativeScript’s belt!
+Building user interfaces declaratively is something the Web community has widely adopted, and nowadays, large applications are built following these principles. For example, Google launched Jetpack Compose, and Apple announced SwiftUI at WWDC19, receiving an immensely positive response from developers.
+Here at Valor Software, we are always excited about new advancements in development technologies, and we are fans of NativeScript. We collaborated with nStudio to provide an effective and enjoyable SwiftUI integration for iOS apps driven by NativeScript.
+In this article, we’ll demonstrate how to use SwiftUI within NativeScript to explore fun new possibilities in building amazing UIs together.
+macOS Catalina or higher
+Xcode 11 or higher
+iOS device/simulator running iOS 13 or higher
+Modern iOS development is primarily done using the Swift programming language. SwiftUI uses a declarative syntax—you state what your user interface should do.
+I recommend taking the official SwiftUI tour, to get familiar with the basic concepts
+We can create an app using a standard TypeScript template:
+ns create swiftui --ts
+cd swiftui
+This will setup what is often called a "vanilla" flavored NativeScript app. In other words, it provides basic data binding capabilities and a rather simple setup. However, what we will cover here applies to any flavor (Angular, React, Svelte, Vue, etc.). You can explore more via StackBlitz from the following:
+JavaScript starter
+TypeScript starter
+Angular starter
+React starter
+Svelte starter
+Vue starter
+Install the SwiftUI plugin:
+npm install @nativescript/swift-ui
+
+ Note
+ |
++Your minimum iOS deployment target should be at least 13. + | +
You can add this line to App_Resources/iOS/build.xcconfig
:
IPHONEOS_DEPLOYMENT_TARGET = 13.0;
+Create your SwiftUI
+Create App_Resources/iOS/src/SampleView.swift:
+import SwiftUI
+
+struct SampleView: View {
+
+ var body: some View {
+ VStack {
+ Text("Hello World")
+ .padding()
+ }
+ }
+}
+Create your SwiftUI Provider
+This will prepare your SwiftUI for two-way data bindings to NativeScript.
+Create App_Resources/iOS/src/SampleViewProvider.swift
:
import SwiftUI
+
+@objc
+class SampleViewProvider: UIViewController, SwiftUIProvider {
+
+ // MARK: INIT
+
+ required init?(coder aDecoder: NSCoder) {
+ super.init(coder: aDecoder)
+ }
+
+ required public init() {
+ super.init(nibName: nil, bundle: nil)
+ }
+
+ public override func viewDidLoad() {
+ super.viewDidLoad()
+ setupSwiftUIView(content: swiftUIView)
+ }
+
+ // MARK: PRIVATE
+
+ private var swiftUIView = SampleView()
+
+ /// Receive data from NativeScript
+ func updateData(data: NSDictionary) {
+ // can be empty
+ }
+
+ /// Allow sending of data to NativeScript
+ var onEvent: ((NSDictionary) -> ())?
+}
+Insert into any NativeScript layout
+app/main-page.xml
<Page
+xmlns="http://schemas.nativescript.org/tns.xsd"
+xmlns:sw="@nativescript/swift-ui"
+class="page"
+>
+<StackLayout>
+<sw:SwiftUI swiftId="sampleView" height="100" />
+</StackLayout>
+</Page>
+Register your SwiftUI via the swiftId
+This can be done in the NativeScript app’s bootstrap file (often app.ts or main.ts).
+app.ts
import {
+registerSwiftUI,
+UIDataDriver
+} from "@nativescript/swift-ui";
+
+// A. You can generate types for your own Swift Provider with 'ns typings ios'
+// B. Otherwise you can ignore by declaring the class name you know you provided
+declare const SampleViewProvider: any;
+
+registerSwiftUI("sampleView", (view) =>
+new UIDataDriver(SampleViewProvider.alloc().init(), view)
+);
+You can now run the app with ns debug ios.
+After running the project once, you can open it in Xcode to further develop your SwiftUI using all the convenient aid of Xcode intellisense.
+For example from the root of your project:
+open platforms/ios/swiftui.xcworkspace
+You will find your .swift code underneath the TNSNativeSource folder as seen here…
+Let’s dive deeper by hooking up data bindings + events between SwiftUI and NativeScript.
+This can be any SwiftUI you would like to use in NativeScript.
+Create App_Resources/iOS/src/SampleView.swift
:
import SwiftUI
+
+class ButtonProps: ObservableObject {
+ @Published var count: Int = 0
+ var incrementCount: (() -> Void)?
+}
+
+struct SampleView: View {
+
+ @ObservedObject var props = ButtonProps()
+
+ var body: some View {
+ VStack(alignment: .center, spacing: 0) {
+ HStack(alignment:.center) {
+ Text("Count \(props.count)")
+ .padding()
+ .scaledToFill()
+ .minimumScaleFactor(0.5)
+ }
+ HStack(alignment: .center) {
+ Button(action: {
+ self.props.incrementCount?()
+ }) {
+ Image(systemName: "plus.circle.fill")
+ .foregroundColor(.white)
+ .padding()
+ .background(LinearGradient(
+ gradient: Gradient(
+ colors: [Color.purple, Color.pink]), startPoint: .top, endPoint: .bottom
+ ))
+ .clipShape(Circle())
+ }
+ }
+ }
+ .padding()
+ .clipShape(Circle())
+ }
+}
+Create App_Resources/iOS/src/SampleViewProvider.swift
:
import SwiftUI
+
+@objc
+class SampleViewProvider: UIViewController, SwiftUIProvider {
+
+ // MARK: INIT
+
+ required init?(coder aDecoder: NSCoder) {
+ super.init(coder: aDecoder)
+ }
+
+ required public init() {
+ super.init(nibName: nil, bundle: nil)
+ }
+
+ public override func viewDidLoad() {
+ super.viewDidLoad()
+ setupSwiftUIView(content: swiftUIView)
+ registerObservers()
+ }
+
+ // MARK: PRIVATE
+
+ private var swiftUIView = SampleView()
+
+ private func registerObservers() {
+ swiftUIView.props.incrementCount = {
+ let count = self.swiftUIView.props.count + 1
+ // update swiftUI view
+ self.swiftUIView.props.count = count
+ // notify nativescript
+ self.onEvent?(["count": count])
+ }
+ }
+
+ // MARK: API
+
+ /// Receive data from NativeScript
+ func updateData(data: NSDictionary) {
+ if let count = data.value(forKey: "count") as? Int {
+ // update swiftUI view
+ swiftUIView.props.count = count
+ // notify nativescript
+ self.onEvent?(["count": count])
+ }
+ }
+
+ /// Send data to NativeScript
+ var onEvent: ((NSDictionary) -> Void)?
+}
+app/main-page.xml
:
<Page
+ xmlns="http://schemas.nativescript.org/tns.xsd"
+ xmlns:sw="@nativescript/swift-ui"
+ navigatingTo="navigatingTo"
+>
+ <StackLayout>
+ <sw:SwiftUI swiftId="sampleView" data="{{ nativeCount }}" swiftUIEvent="{{ onEvent }}" loaded="{{ loadedSwiftUI }}" />
+ <Label text="{{ 'NativeScript Label: ' + nativeCount.count }}" class="h2" />
+ <Button text="NativeScript data bindings: Decrement" tap="{{ updateNativeScriptData }}" class="btn btn-primary" />
+ <Button text="SwiftUI data bindings: Decrement" tap="{{ updateSwiftData }}" class="btn btn-primary" />
+ </StackLayout>
+</Page>
+app/main-page.ts
:
import {
+ registerSwiftUI,
+ UIDataDriver,
+ SwiftUI,
+ SwiftUIEventData,
+} from "@nativescript/swift-ui";
+import {
+ EventData,
+ Observable,
+ Page
+} from "@nativescript/core";
+
+// A. You can generate types for your own Swift Provider with 'ns typings ios'
+// B. Otherwise you can ignore by declaring the class name you know you provided
+declare const SampleViewProvider: any;
+
+registerSwiftUI("sampleView", (view) =>
+ new UIDataDriver(SampleViewProvider.alloc().init(), view)
+);
+
+interface CountData {
+ count: number;
+}
+
+export function navigatingTo(args: EventData) {
+ const page = <Page>args.object;
+ page.bindingContext = new DemoModel();
+}
+
+export class DemoModel extends Observable {
+ swiftui: SwiftUI;
+ nativeCount = {
+ count: 0,
+ };
+
+ loadedSwiftUI(args) {
+ this.swiftui = args.object;
+ }
+
+ onEvent(evt: SwiftUIEventData<CountData>) {
+ this.set("nativeCount", { count: evt.data.count });
+ }
+
+ updateNativeScriptData() {
+ this.set('nativeCount', { count: this.nativeCount.count - 1 });
+ }
+
+ updateSwiftData() {
+ this.swiftui.updateData({ count: this.nativeCount.count - 1 });
+ }
+}
+Not while ago, Angular team released a stable 15 version with few neat features, such as host directives.
+In this article I will try to explain how to leverage the directive composition approach and move from old class-inheritance to a composition approach.
+Host directives are a standalone directives that can be added to a component via @Component decorator, thus avoiding the need to apply the directive through the markup. +Developers can expose its inputs and outputs. +Additionally, they can also map their names to avoid the confusion between components and directives properties.
+With a complex component comes complex business logic inside its class. Typescript has mixins support to divide logic between multiple classes and then join it into one giant class.
+Mixins are widely used in @angular/material project. For example, Material Button Component.
+But, as you can see, it itself requires complex structures to actually use it. Not mentioning setting inputs/outputs properties as an array for the decorator itself. +In short, developers can start struggling with input/output properties and class dependencies if they use lots of mixins.
+Another way is to use long chains of class inheritance.
+In the end, the final component would have a huge constructor (prior to inject function) and supporting this constructor sometimes becomes too painful.
+Another way would be to use services injected into your component, but this creates an additional headache with keeping the config up-to-date (triggering some configuration update for the service when some component’s input property was changed, etc.).
+Directive composition approach works similarly to the Typescript’s mixins: you have multiple classes that contain their own logic, and in the end they all end up used for one final class (component). The difference is that mixins are combined into one class, and for directive composition you need to inject your directive instances into your component class.
+First, I’ll leave a link to an example app built with nx to split the app and its libs.
+Let’s take an example of simple form control component:
+@Component({
+ selector: 'lib-combobox',
+ template: `
+ <input
+ #comboboxInput
+ class="combobox-input"
+ type="text"
+ [attr.list]="'inputOptions' + uniqueId"
+ [attr.placeholder]="placeholder"
+ autocomplete="off"
+ (blur)="onBlur()"
+ [ngModel]="value"
+ (input)="onInput($event)"
+ />
+ <datalist [id]="'inputOptions' + uniqueId" *ngIf="options?.length">
+ <option *ngFor="let option of options" [value]="option">{{ option }}</option>
+ </datalist>
+ `,
+ styleUrls: ['./combobox.component.scss'],
+ changeDetection: ChangeDetectionStrategy.OnPush,
+ providers: [{
+ provide: NG_VALUE_ACCESSOR,
+ useExisting: forwardRef(() => ComboboxComponent),
+ multi: true,
+ }],
+})
+export class ComboboxComponent implements ControlValueAccessor, AfterViewInit {
+ @ViewChild('comboboxInput') comboboxInput: ElementRef<HTMLInputElement>;
+ readonly uniqueId = ++UNIQUE_ID;
+
+ @Input() options: string[];
+
+ disabled = false;
+
+ get value() {
+ return this._value;
+ }
+ set value(v: string) {
+ this.setValue(v, true);
+ }
+ private _value = '';
+ private viewInit = false;
+
+ private onTouched: () => void;
+ private onChange: (value: string) => void;
+
+ constructor(private _renderer: Renderer2) {}
+
+ ngAfterViewInit() {
+ this.viewInit = true;
+ this.setValue(this._value, false);
+ }
+
+ onInput(event: Event) {
+ this.setValue((<HTMLInputElement>event.target).value ?? '', true);
+ }
+
+ onBlur() {
+ this.onTouched();
+ }
+
+ registerOnChange(fn: (value: string) => void) {
+ this.onChange = fn;
+ }
+
+ registerOnTouched(fn: () => void) {
+ this.onTouched = fn;
+ }
+
+ setDisabledState(isDisabled: boolean) {
+ this.disabled = isDisabled;
+ }
+
+ writeValue(value: string) {
+ this.setValue(value, false);
+ }
+
+ setValue(value: string, emitEvent: boolean) {
+ this._value = value;
+ if (this.viewInit) {
+ this._renderer.setProperty(this.comboboxInput.nativeElement, 'value', value);
+ }
+ if (emitEvent && typeof this.onChange === 'function') {
+ this.onChange(value);
+ }
+ }
+}
+This component implements ControlValueAccessor and implements its methods such as writeValue, registerOnChange, registerOnTouched. +And this stuff is commonly repeated across multiple components in your app.
+Previously, to simplify the logic you could extract those methods into base abstract class. But this class might require it’s dependencies, which needed to be passed via super call in constructor. This complicates things.
+Let’s simplify the code, and first, create a standalone directive called C#vaDirective#.
+@Directive({
+ selector: '[appCva]',
+ standalone: true,
+ providers: [
+ // Small helper service to unsubscribe from streams when component destroys.
+ DestroyedService
+ ],
+})
+export class CvaDirective<T = unknown> implements ControlValueAccessor, OnDestroy, AfterViewInit, DoCheck {
+ /**
+ * NgControl instance.
+ */
+ readonly ngControl = inject(NgControl, {
+ optional: true,
+ });
+
+ readonly cdRef = inject(ChangeDetectorRef, {
+ host: true
+ })
+
+ /**
+ * Form container instance. Usually ngForm or FormGroup directives.
+ */
+ readonly controlContainer = inject(ControlContainer, {
+ optional: true,
+ skipSelf: true,
+ });
+
+ /**
+ * Separate NgForm instance. For cases when formGroup is used with the form itself.
+ */
+ readonly ngForm = inject(NgForm, {
+ optional: true,
+ skipSelf: true,
+ });
+
+ /**
+ * Element reference.
+ */
+ readonly elementRef = inject(ElementRef);
+
+ private readonly _destroy$ = inject(DestroyedService);
+
+ /** Whether the input is disabled */
+ @Input()
+ set disabled(value: boolean) {
+ this.setDisabledState(value);
+ }
+ get disabled(): boolean {
+ if (this.ngControl && this.ngControl.disabled !== null) {
+ return this.ngControl.disabled;
+ }
+ return this._disabled;
+ }
+
+ private _disabled = false;
+
+ /**
+ * Current value of the control.
+ */
+ value: Nullable<T>;
+
+ /** Whether control has errors */
+ get controlInvalid(): boolean {
+ return this._controlInvalid;
+ }
+
+ /**
+ * @hidden
+ */
+ private _controlInvalid = false;
+
+ /**
+ * Emits ehen state of the control has been changed.
+ */
+ readonly stateChanges: Subject<string> = new Subject<string>();
+
+ /** @hidden */
+ private readonly _subscriptions = new Subscription();
+
+ /** @hidden */
+ // eslint-disable-next-line @typescript-eslint/no-empty-function
+ onChange: (value: unknown) => void = () => {};
+
+ /** @hidden */
+ // eslint-disable-next-line @typescript-eslint/no-empty-function
+ onTouched = (): void => {};
+
+ /** @hidden */
+ constructor() {
+ if (this.ngControl) {
+ this.ngControl.valueAccessor = this;
+ }
+ }
+
+ /**
+ * Re-validate and emit event to parent container on every CD cycle as they are some errors
+ * that we can't subscribe to.
+ */
+ ngDoCheck(): void {
+ if (this.ngControl) {
+ this.updateErrorState();
+ }
+ }
+
+ /** @hidden */
+ ngAfterViewInit(): void {
+ if (this.ngControl) {
+ this._subscriptions.add(
+ this.ngControl.statusChanges?.subscribe(() => {
+ this._markForCheck();
+ })
+ );
+ }
+ }
+
+ /** @hidden */
+ ngOnDestroy(): void {
+ this._subscriptions.unsubscribe();
+ this.stateChanges.complete();
+ // this.formField?.unregisterFormFieldControl(this);
+ }
+
+ writeValue(value: T): void {
+ this.value = value;
+ this.stateChanges.next('writeValue');
+ this._markForCheck();
+ }
+ registerOnChange(fn: (value: unknown) => void): void {
+ this.onChange = fn;
+ }
+ registerOnTouched(fn: () => void): void {
+ this.onTouched = fn;
+ }
+ setDisabledState(isDisabled: boolean): void {
+ if (isDisabled === this._disabled) {
+ return;
+ }
+ this._disabled = isDisabled;
+ this.stateChanges.next('setDisabledState');
+ this._markForCheck();
+ }
+
+ /**
+ * Need re-validates errors on every CD iteration to make sure we are also
+ * covering non-control errors, errors that happens outside of this control
+ */
+ updateErrorState(): void {
+ const parent = this.ngForm;
+ const parentControlContainer = this.controlContainer;
+ const control = this.ngControl
+ ? (this.ngControl.control as FormControl)
+ : null;
+ const newStatusIsError = !!(
+ control?.invalid &&
+ (control.dirty ||
+ control.touched ||
+ parent?.submitted ||
+ (parentControlContainer as unknown as FormGroupDirective)?.submitted)
+ );
+
+ if (newStatusIsError !== this.controlInvalid) {
+ this._controlInvalid = newStatusIsError;
+ this.stateChanges.next('updateErrorState');
+ this._markForCheck();
+ }
+ }
+
+ /**
+ * Used to change the value of a control.
+ * @param value the value to be applied
+ * @param emitOnChange whether to emit "onChange" event.
+ * Should be "false", if the change is made programmatically (internally) by the control, "true" otherwise
+ */
+ setValue(value: T, emitOnChange = true): void {
+ if (value !== this.value) {
+ this.writeValue(value);
+ if (emitOnChange) {
+ this.onChange(value);
+ }
+ this._markForCheck();
+ }
+ }
+
+ /** @hidden */
+ private _markForCheck(): void {
+ this.cdRef.markForCheck();
+ }
+}
+Let me explain what’s going on inside this directive. +First, we are declaring it as a standalone, which means we can apply it to a component via hostDirectives property of the @Component. +Next, since we need to support template-driven and reactive forms, lets' inject necessary dependencies such as NgControl, NgForm and ControlContainer. We will need these properties later.
+You may see that we also injected ChangeDetectorRef from the host. This is needed to get the components change detector and call it when the state of the control being changed (valid/invalid).
+Next, we implement all members of ControlValueAccessor interface for further usage in the component. +We also have support for a disabled state of the form control, which may be handy in real-case scenarios. This input property is optional and you can ignore it during input exposing.
+We also have updateErrorState method which automatically checks whether the control is valid and checks whether the user interacted with the control itself or submitted the form.
+That’s all for the directive itself, now let’s update our combobox component to use this directive instead direct ControlValueAccessor implementation:
+@Component({
+ selector: 'app-combobox',
+ template: `
+ <input
+ #comboboxInput
+ class="combobox-input"
+ type="text"
+ [attr.list]="'inputOptions' + uniqueId"
+ [attr.placeholder]="placeholder"
+ autocomplete="off"
+ (blur)="onBlur()"
+ [ngModel]="value"
+ (input)="onInput($event)"
+ />
+ <datalist [id]="'inputOptions' + uniqueId" *ngIf="options?.length">
+ <option *ngFor="let option of options" [value]="option">{{ option }}</option>
+ </datalist>
+`
+ styleUrls: ['./combobox.component.scss'],
+ providers: [
+ // Small helper service to unsubscribe from streams when component destroys.
+ DestroyedService,
+ ],
+ hostDirectives: [
+ {
+ directive: CvaDirective,
+ }
+ ],
+})
+export class ComboboxComponent implements OnInit {
+ readonly _cvaDirective = inject<CvaDirective<string>>(CvaDirective);
+ private readonly _destroyed$ = inject(DestroyedService);
+ @ViewChild('comboboxInput')
+ comboboxInput?: ElementRef<HTMLInputElement>;
+ readonly uniqueId = ++UNIQUE_ID;
+
+ /** Notify user of invalid control if necessary. */
+ @HostBinding('class.is-invalid')
+ controlInvalid = false;
+
+ get value(): Nullable<string> {
+ return this._cvaDirective.value;
+ }
+
+ @Input()
+ placeholder: Nullable<string> = null;
+
+ /** Available options for the dropdown. */
+ @Input()
+ options: string[] = [];
+
+ /** Method called when user types into the input field. */
+ onInput(event: Event) {
+ this._cvaDirective.setValue(
+ (<HTMLInputElement>event.target).value ?? '',
+ true
+ );
+ }
+
+ /** Method called when user focuses-out the input field. */
+ onBlur() {
+ this._cvaDirective.onTouched();
+ }
+}
+So, we’ve removed all form-related stuff to the directive. This gives us a more clear and readable component.
+You see that we’ve injected CvaDirective into the component to call its members such as setValue and use the initial value of the form control to set it for the input field.
+With the example above I’ve shown how to simplify your component and move all background logic into a separate class without the need of inheritance.
+Now, let’s say we want it to accept not only string[], but also Observable<string[]>, or even custom data source class which retrieves the data from some backend.
+And again, host directives to the rescue!
+Before we start with the directive itself, let’s define what our directive should support:
+Automatically subscribe/unsubscribe from the dataSource;
+When datasource’s data changes, or new dataSource instance being passed, notify parent component of the changes in data;
+In this example, we will create a simple data source class which will convert passed data into an observable and simply return it.
+First, let’s generate abstract data source provider class which our components would implement in own way:
+import { Nullable } from '@host-directives-app/shared';
+import { Observable } from 'rxjs';
+
+/**
+ * Acceptable data source types.
+ */
+export type DataSource<T = unknown> =
+ | AbstractDataSourceProvider<T>
+ | Observable<T[]>
+ | T[];
+
+export interface DataSourceParser<
+ T = unknown,
+ P extends AbstractDataSourceProvider<T> = AbstractDataSourceProvider<T>
+> {
+ /**
+ * Defines which data provider class to initiate.
+ * @param source data source to be parsed.
+ */
+ parse(source: Nullable<DataSource<T>>): Nullable<P>;
+}
+
+export function isDataSource<T = unknown>(
+ value: any
+): value is AbstractDataSourceProvider<T> {
+ return (
+ value &&
+ 'unsubscribe' &&
+ typeof value.unsubscribe === 'function' &&
+ value.dataChanges
+ );
+}
+
+export abstract class AbstractDataSourceProvider<T = unknown> {
+ abstract fetch(): Observable<T[]>;
+}
+So, here we’re declaring that our DataSource can accept three types of data: Class instance, Observable of an array and a plain array.
+With AbstractDataSourceProvider we can now actually create our directive called DataSourceDirective:
+export const DATA_SOURCE_TRANSFORMER = new InjectionToken<DataSourceParser>('DataSourceTransformerClass');
+
+@Directive({
+ selector: '[appDataSource]',
+ standalone: true,
+ providers: [
+ // Small helper service to unsubscribe from streams when component destroys.
+ DestroyedService
+ ],
+})
+export class DataSourceDirective<
+ T = unknown,
+ P extends AbstractDataSourceProvider<T> = AbstractDataSourceProvider<T>
+> implements OnDestroy {
+ @Input()
+ set dataSource(data: Nullable<DataSource<T>>) {
+ this._dataSource = data;
+
+ this.dataSourceChanged.next();
+
+ this._initializeDataSource();
+ }
+
+ get dataSource() {
+ return this._dataSource;
+ }
+
+ /** @hidden */
+ dataSourceProvider: Nullable<P>;
+
+ /** @hidden */
+ private _dsSubscription = new Subscription();
+
+ /**
+ * Data stream. Emits when new data retrieved.
+ */
+ readonly dataChanged$ = new BehaviorSubject<T[]>([]);
+
+ /**
+ * Emits when the data source object has been changed.
+ */
+ @Output()
+ readonly dataSourceChanged = new EventEmitter<void>();
+
+ /**
+ * Event emitted when datasource content has been changed.
+ */
+ @Output()
+ readonly dataChanged = new EventEmitter<T[]>();
+
+ /**
+ * Event emitted when data provider loading state has been changed.
+ */
+ @Output()
+ readonly isLoading = new EventEmitter<boolean>();
+
+ private _dataSource: Nullable<DataSource<T>>;
+
+ private readonly _destroyed$ = inject(DestroyedService);
+
+ private readonly _dataSourceTransformer = inject<DataSourceParser<T, P>>(DATA_SOURCE_TRANSFORMER);
+
+ /** @hidden */
+ private _initializeDataSource(): void {
+ if (isDataSource(this.dataSource)) {
+ this.dataSourceProvider?.unsubscribe();
+
+ this._dsSubscription?.unsubscribe();
+ }
+ // Convert whatever comes in as DataSource, so we can work with it identically
+ this.dataSourceProvider = this._toDataStream(this.dataSource);
+
+ if (!this.dataSourceProvider) {
+ return;
+ }
+
+ this._dsSubscription = new Subscription();
+
+ this._dsSubscription.add(
+ this.dataSourceProvider.dataLoading
+ .pipe(takeUntil(this._destroyed$))
+ .subscribe((isLoading) => this.isLoading.emit(isLoading))
+ );
+
+ this._dsSubscription.add(
+ this.dataSourceProvider.dataChanges
+ .pipe(takeUntil(this._destroyed$))
+ .subscribe((data) => {
+ this.dataChanged.emit(data);
+ this.dataChanged$.next(data);
+ })
+ );
+ }
+
+ /** @hidden */
+ ngOnDestroy(): void {
+ this.dataSourceProvider?.unsubscribe();
+ this._dsSubscription?.unsubscribe();
+ }
+
+ /** @Hidden */
+ private _toDataStream(source: Nullable<DataSource<T>>): Nullable<P> {
+ return this._dataSourceTransformer
+ ? this._dataSourceTransformer.parse(source)
+ : undefined;
+ }
+}
+Quick explanation of what’s going on here: +We have T and P generic types which are responsible for providing awareness of the data types we are working with in our components, so IDE also knows it, and provides suggestions. +Next, we have a dataSource input property which accepts our DataSource type.
+When the data is set, we call _initializeDataSource method which does couple of things: +First, it closes the stream of the previous data source. +Then, it transforms our data into acceptable data source provider with the help of our DataSourceParser which is injected with a DATA_SOURCE_TRANSFORMER injection token. +Lastly, it subscribes to the events of the data source provider and passes them to the component it applied to.
+That’s all for the directive itself and its dependencies.
+Now, let’s go back to our combobox component, and update it in order to accept multiple types of data.
+First, we need to implement our AbstractDataSourceProvider class:
+import {
+ AbstractDataSourceProvider,
+ DataSource,
+ DataSourceParser,
+ isDataSource,
+} from '@host-directives-app/data-source';
+import { Nullable } from '@host-directives-app/shared';
+import { isObservable, Observable, of } from 'rxjs';
+
+export interface ComboboxItem {
+ label: string;
+ value: string;
+}
+
+export type AcceptableComboboxItem = ComboboxItem | string;
+
+export class ComboboxDataSource extends AbstractDataSourceProvider<AcceptableComboboxItem> {
+ fetch(): Observable<AcceptableComboboxItem[]> {
+ return isObservable(this.items) ? this.items : of(this.items);
+ }
+ constructor(
+ public items:
+ | Observable<AcceptableComboboxItem[]>
+ | AcceptableComboboxItem[]
+ ) {
+ super();
+ }
+}
+
+export class ComboboxDataTransformer
+ implements DataSourceParser<AcceptableComboboxItem>
+{
+ parse(source: Nullable<DataSource<AcceptableComboboxItem>>) {
+ // If source is an instance of a data source class, return it without modifications.
+ if (isDataSource(source)) {
+ return source as ComboboxDataSource;
+ } else if (Array.isArray(source) || isObservable(source)) {
+ // If the source is an array or observable, return new instance of the datasource with items inside.
+ return new ComboboxDataSource(source);
+ }
+
+ return null;
+ }
+}
+As you can see, we defined an additional interface for the combobox item in case we want to render the label different than its value. +And for the data source provider, we are just checking whether the data is observable or a plain array. If it’s an array, we wrap it into Observable and return it.
+Additionally, we are implementing DataSourceParser for combobox to be able to apply the necessary data source class for the data passed to it.
+Now, let’s update our component to work with the data source directive:
+@Component({
+ selector: 'app-combobox',
+ template: `
+<input
+ #comboboxInput
+ class="combobox-input"
+ type="text"
+ [attr.list]="'inputOptions' + uniqueId"
+ [attr.placeholder]="placeholder"
+ autocomplete="off"
+ (blur)="onBlur()"
+ [ngModel]="value"
+ (input)="onInput($event)"
+/>
+<datalist [id]="'inputOptions' + uniqueId" *ngIf="options?.length">
+ <option *ngFor="let option of options" [value]="option.value">{{ option.label }}</option>
+</datalist>
+`
+ styleUrls: ['./combobox.component.scss'],
+ providers: [
+ // Small helper service to unsubscribe from streams when component destroys.
+ DestroyedService,
+ {
+ provide: DATA_SOURCE_TRANSFORMER,
+ useClass: ComboboxDataTransformer,
+ },
+ ],
+ hostDirectives: [
+ {
+ directive: CvaDirective,
+ },
+ {
+ directive: DataSourceDirective,
+ // Expose dataSource input property directly on combobox component
+ // eslint-disable-next-line @angular-eslint/no-inputs-metadata-property, @angular-eslint/no-input-rename
+ inputs: ['dataSource:options'],
+ // Expose data source's directive outputs directly from combobox component.
+ // eslint-disable-next-line @angular-eslint/no-outputs-metadata-property
+ outputs: ['dataSourceChanged', 'dataChanged', 'isLoading'],
+ },
+ ],
+})
+export class ComboboxComponent implements OnInit {
+ readonly _cvaDirective =
+ inject<CvaDirective<AcceptableComboboxItem>>(CvaDirective);
+ readonly _dataSourceDirective =
+ inject<DataSourceDirective<AcceptableComboboxItem, ComboboxDataSource>>(
+ DataSourceDirective
+ );
+ private readonly _destroyed$ = inject(DestroyedService);
+ @ViewChild('comboboxInput')
+ comboboxInput?: ElementRef<HTMLInputElement>;
+ readonly uniqueId = ++UNIQUE_ID;
+
+ @HostBinding('class.is-invalid')
+ controlInvalid = false;
+
+ get value(): Nullable<AcceptableComboboxItem> {
+ return this._cvaDirective.value;
+ }
+
+ @Input()
+ placeholder: Nullable<string> = null;
+
+ // Previously it was @Input. Now it's just a property of a class.
+ options: ComboboxItem[] = [];
+
+ ngOnInit(): void {
+ // Subscribe to the data source's directive data stream and update `options` array
+ this._dataSourceDirective.dataChanged$
+ .pipe(takeUntil(this._destroyed$))
+ .subscribe((data) => {
+ this.options = this._formatOptions(data);
+ console.log(data);
+ });
+ this._cvaDirective.stateChanges
+ .pipe(
+ filter((stateType) => stateType === 'updateErrorState'),
+ takeUntil(this._destroyed$)
+ )
+ .subscribe(() => {
+ this.controlInvalid = this._cvaDirective.controlInvalid;
+ });
+ }
+
+ /** Method called when user types into the input field. */
+ onInput(event: Event) {
+ this._cvaDirective.setValue(
+ (<HTMLInputElement>event.target).value ?? '',
+ true
+ );
+ }
+
+ /** Method called when user focuses-out the input field. */
+ onBlur() {
+ this._cvaDirective.onTouched();
+ }
+
+ /** Transform from plain string into value/label object for more human-readability of the options */
+ private _formatOptions(data: AcceptableComboboxItem[]): ComboboxItem[] {
+ return data.map((option) =>
+ typeof option === 'string' ? { value: option, label: option } : option
+ );
+ }
+}
+So, what’s changed?
+First, we added our DataSourceDirective to hostDirectives and exposed its dataSource input property as options input property which we previously had directly in the component.
+Next, instead of relying directly on options input property, we’re subscribing to DataSourceDirective’s dataChanged$ BehaviourSubject and waiting for new data to come. +When the data is emitted, we update the inner options property with the data received from the DataSourceProvider.
+And that’s pretty much it!
+In conclusion: Even though Host Directives at the early stage of its development, and has some childish issues, such as explicit definition of the host directive, it already provides huge benefit of simplifying the codebase of your existing components and libraries by splitting the logic between multiple independent classes and reducing the amount of inheritance chains.
+As I was mentioning at the beginning of the article, here’s a complete example application used in this article.
+Issue: "A user has a "Free Subscription" price plan with some limited resource usage."
Currently, it looks like this:
+When a user builds his application on his own (an application is connected to Medusa via tokens and medusa-plugin), medusa-plugin makes an API call to Medusa Server with a request to update application builds inside Medusa Dashboard, it creates some records in the database and you can manage and use those builds in the Dashboard then. +At the current moment, we don’t have any restrictions for build amount and usage according to the Price Plan so users can easily get a quote over the limit like for usage of 130 builds with 100 builds in the Plan.
+The task sounds like "Restrict overlimit usage of Medusa Resources according to Quota". Sounds like a five-minute task, just insert a checker before business logic. If the check is positive – go further, if negative – stop action. +Okay, it’s done. Even works. But there’s another issue – it will look for a User like a bug. No… like a BUG. As all things, described above, are under-the-hood processes, the user cannot know at that moment that he got overlimit, so we have to notify the user somehow in some way.
+We don’t have any real-time solutions implemented inside yet, so it seems that, easy from first sight, the task becomes a task of implementing real-time technology. Okay, what do we know about it? Web Sockets – cool, but they are unidirectional, we do not need them. In addition, we might have an issue if the load balancer doesn’t support WebSockets (like a basic AWS Load Balancer).
+SSE – Server-Sent-Events. Technology, which might cover a lot of cases where people use WebSockets. In addition, it brings less headache with integration as it uses HTTP protocol only. +Some words of difference between WebSockets and SSE.
+Okay, we said it, we did it! +On the Back End side, we use NestJS, Front End is NextJS (for a non-native speaker, it’s quite difficult and funny to try to pronounce them instantly one by one :) ).
+Let’s start. NestJS documentation has some words about the implementation of SSE. So it looks easy to do, it has simple examples, and it works… But in our case, not always and not everywhere.
+There are a few issues I have faced during implementation.
+An example in NestJS documentation is related to interval usage and sends messages once a second. We do not need that way, we need to send messages at the moment we want, and only to users whom this message concerns. +So we had to alter the event emitter to send messages only at that moment we need. +It was done.
+@Injectable()
+export class ServerEventsService {
+private readonly emitter: EventEmitter;
+
+constructor() {
+this.emitter = new EventEmitter();
+}
+
+subscribe(userId: string): Observable {
+return fromEvent(this.emitter, userId);
+}
+
+emit(userId: string, data?: SseEventMessage): void {
+this.emitter.emit(userId, { data });
+}
+}
+As we are using NextJS, it means that most pages are SSR (Server Side Rendered). That means, in addition, that we cannot create the EventSource instance anywhere we want. So we have to initialize the eventsource after the component has been rendered in the browser.
+Firstly, we need to receive messages and show them globally, it should not be related to a certain page. Another thing is how to make it work in our case of SSR.
+We just need to do that in useEffect, to make it start working in a Browser already. So we created a Global Modal with some inputs, integrated our solution and it worked. Cool, we are so happy!
+Our solution worked well in the case of real-time processes, but what if Medusa Dashboard was closed? We need to notify a user that he has some issues with using Medusa because of quota overlimit. Easy, just make a check. The thing is that we need to make an async call in useEffect and linter tells us that we are incorrect with that decision… But, there’s a workaround. We need to wrap the async function into the self-invoking unnamed function.
+(async () => {
+await task();
+})();
+It’s a bad thing to have a checker on the Back End side for the under-the-hood process and the same checker on the Front End side, so on a Front End we just make an API call without using a response. On the server side, we check all related data, and in case the checker responds negatively, we emit a new message from Server to UI. +But it didn’t work.
+Under-the-hood things emitted the message from one controller, check by demand the emitted message from another controller.
+What is the issue? Why doesn’t it work? I’m emitting a message, and it emits but the UI doesn’t get it…
+- I checked if my emitters have listeners. One of them had one, and another one – did not.
- Whaaat?
I have created an SSE Service and injected that as a provider into the App Module, then into another Module. It’s not directly said anywhere in the documentation that by doing things in this way, we will get different multiple instances of Services. Hmmm… Okay. +Creating a separate module with a separate service, then injecting modules into other modules – saved me… The Next JS modules are Singletons, so now I have only one instance of the SSE part and really can use this service everywhere in the Server and emit messages to a certain channel.
+That’s it. We have integrated SSE into the System, it works great and this is a base to develop that further.
+Now we can notify a user if something is happening under the hood and the user should be aware of it. Great!
+Hey everyone, Medusas speaking! +Back in September, we shared about our re-launch of the only commercially available tool for Module Federation - Medusa. The community expressed deep interest, mostly, in the form of feature requests, which is great! We’re excited to bring you updates on those requests towards Medusa. So, let’s see what we have to share after the last few months’ hot (burning) sprints. We’ve broken the “new arrivals” into two segments to help you digest them simpler. Here you go! See the updates for the two main categories:
+We previously encountered an issue, using remote IDs that included the @ syntax. This issue has now been resolved, and you can use remote IDs as expected, possibly even better than you expected :-)
+We’ve received reports that the Windows users' builds had been failing due to a hardcoded backtick in the codebase. We’ve resolved this issue, and the code has become platform-agnostic, feel free to use it on any operating system.
+Users who wanted to use a different version of Webpack from the one provided by the federation dashboard plugin, previously had to use a force command to install due to unmet peer dependencies. The dependency requirement for Webpack has now been updated to ^5.40.0, allowing users to install the desired version, within reason, without using a force command.
+Added a default version picker that automatically selects the latest available version.
+Updated the Sentry SDK to the latest version to improve error tracking and reporting.
+Made various enhancements to the user interface, and fixed reported issues to improve user experience.
+Upgraded from Material Design Version 4 to Version 5 to use the latest design guidelines and features.
+Added subscription plans to allow users to choose the features and pricing that suit their needs best.
+Added a new onboarding page to improve the users’ first experience with the platform.
+Started the development towards the integration with Cloudflare to deliver enhanced security and performance.
+Custom Environments and Organizations are enabled to provide additional functionality and flexibility.
+We’ve updated our platform to enable usage of React 18 and Next 13, along with the latest version of MUI. Experience a higher development velocity and improve the overall performance!
+Providing previously impossible visibility and control over federated applications, Medusa is a critical tool for any team working with Federated Micro Frontends.
+Seattle, Wa., Aug 22, 2022, Zackary Jackson, co-creator of Webpack Module Federation, co-author of Practical Module Federation, and Webpack Core Team member and Principal Engineer at Lululemon, together with Valor Software as exclusive partners for Module Federation support have launched their first collaborative SaaS offering, Medusa.
+Medusa creates an environment where software developers finally have visibility into their module federation-powered and distributed micro frontends. Additionally, Medusa will provide, for the first time, the ability to control federated applications, at runtime, with unparalleled ease.
+“We believe that software engineering teams should be able to work with visibility, autonomy, confidence, and independence without sacrificing the stability and quality of their product. We believe software releases should be safe, fast, and immutable” – Zack Jackson
+In an increasingly distributed world, software development and business teams are looking to be able to ship more value to their customers, more often and with more reliability. Medusa provides those teams the ability to deploy in both an evergreen mode, while also having the ability to set specific versions of remote modules for reliability. This flexibility allows teams to move fast and be able to nearly instantly upgrade or roll back their platforms without having to redeploy.
+Understanding what team is pulling in a dependency in a distributed environment or which version of a remote module a team is pulling in also becomes problematic as applications grow. With the dependency graphs inside of Medusa, teams are easily able to find potential problems and make adjustments for version compatibility.
+Founded in 2013 by Dmitriy Shekhovtsov (Dima) as a MEAN stack software development firm, Valor has evolved into a globally renowned software development and consulting firm with over 130 individuals working on Full Stack development with a primary focus on all things TypeScript from 5 continents.
+With experience in startups, Fortune 25 organizations, and Global NGOs, and Dima who together with Manfred Steyer brought Module Federation support into the Angular CLI leading to its popularity in the Angular community, Valor is uniquely positioned to create the Medusa SaaS platform from both a technology perspective but also a product positioning perspective.
+Medusa team +info@medusa.codes
+If you reached this article, you are likely planning to either start using Nx monorepos, or to migrate an existing project into an existing monorepo.
+If you’re unfamiliar with Nx monorepos, make sure to check the many advantages it can bring you, including better code reusability, dependency maintenance, and developer experience.
+Now, if you decided that you’re going to migrate your application into an existing monorepo, you might be wondering 1) how to do it the best possible way and 2) if it is possible and worth the effort to maintain your history at all.
+The short answer is simply git. Git will allow you to pull your changes automatically instead of requiring you to manually copy and paste all your files within a folder. Also, your history will be almost entirely preserved with minimal effort, which is an amazing benefit.
+Another benefit of using git is continuity. We’ll get into more detail later, but after you create the first pull request for the migration, your teammates will review it, you may go into a deep QA process and many other things can happen between the PR creation and the actual merge and release.
+What if during this process a teammate needs to push a fix or an important feature on the original repo? If you don’t use git, you’ll probably be left with the single alternative of finding and replicating every change manually and potentially creating another entire process of review.
+However, if you use git for the first step, you’ll see that you can later continuously pull the changes from one repo to another with the traditional git merge command. That not just speeds you up but is also much more reliable in terms of getting the new code correctly.
+Migrating an entire app to another repo can be a pretty impactful change. That means that your first deployment from the monorepo will naturally be very hesitant.
+Before starting the actual migration, there are a few steps I recommend in order to have a smoother transition. The first deployment from the monorepo will likely be a very cautious one. After all, big changes like this always come with a bit of hesitance.
+To minimize the changes that will happen during the actual migration, we can anticipate some steps. You want to make sure that you break big changes into several smaller steps that will allow you to focus on making them completely ready. Also this way you solve potential issues gradually instead of dropping a massive change that can leave you with a pack of problems to solve.
+One of the most time-consuming tasks from the migration will potentially be matching dependencies. That is especially true if you have major version differences in significant packages, like your framework.
+To do that, you can use a JSON comparison to see the differences between the two package.json files, specifically in the dependencies and devDependencies fields. Simply paste one file at each side and check the mismatched versions by taking semver’s resolution into account. For this step, you will not add any dependencies to either repo, as the goal here is to only make sure that both applications run correctly with the same dependencies.
+After you make sure that all the dependencies are matching, do a deployment for the affected repos and keep an eye out in the next few days for new bugs that might be related to the upgrades while you work on the next steps.
+WARNING: While it is possible to manage dependencies with multiple versions within a repo, Nx strongly discourages it, so consider making an effort to match dependencies before attempting to keep different versions.
+If you have lint checks running on CI/CD, you’ll eventually need to adapt the to-be-migrated repo to the new code style in order to be able to push the new code on the monorepo. +While it’s true that you can easily have a custom lint configuration for different projects within a monorepo, you likely want to follow a similar code style to guarantee consistency between your team. So while this is not mandatory, it is a good thing to consider. This step should take into consideration linting, formatting, and other project-specific settings like TypeScript strict mode.
+Once more, it’s recommended to do this before you start with the repo migration to ensure that the migration is restricted to its purpose and does not include any other side effects.
+This is also helpful for not running into conflicts when you continuously pull changes to the monorepo as you won’t have to deal with code style and formatting differences.
+When you finally approach the moment of deployment, it is natural that your team will keep an eye out for any issues. This, however, may lead to teammates reporting existing issues as if they were related to the migration, which can lead to some tension by making others think that some bugs were introduced.
+Also, it’s common that every other team member has a few issues and problems tracked only in their mind that didn’t make it to a real ticket that then gets known to the entire team.
+To mitigate this, I encourage you to talk to the teammates and document all the small issues they can think of. This way, colleagues can be easily consulted, and issues solved before team raise their hands to report a bug caused by the migration.
+Once all the previous steps are done, it is time to work on the actual migration. This step is surprisingly easy since we’re using git to do the work for us.
+We’ll start by adding an Nx project that will later hold the migrated code. Assuming you’ll be migrating an Angular app named single-app-repo, simply run:
+nx generate @nx/angular:application single-app-repo
+NOTE: always check in the docs for the generator to see the options available
+You’ll get a new apps/single-apps-repo folder that contains a few configuration files at its root and a src directory, which will hold the implementation and contains an example app.
+With the project setup, we can start bringing in the original repo code. Assuming the repo that will be migrated is named inside the org your-company, run the following command:
+git remote add single-app-repo github.com/your-company/single-app-repo
+This will add a new origin that points to the existing repo, which you’ll use to perform the initial merge and to further update if that repo happens to have other activity prior to the first merge.
+After this, we’ll merge the code from the original repo. Assuming the latest changes you want to pull are on the main branch, run:
+git merge single-app-repo/main --allow-unrelated-histories
+This will bring all your files to the root of the application and naturally result in some conflicts.
+You probably are facing conflicts in your package.json and package-lock.json/yarn.lock files. For these files specifically, you won’t be able to retain history as they will live on the same path on both repositories, so you can simply accept the current changes and discard the original repo’s changes.
+For any other conflicting files, it’s worth looking at each and handling them separately as you consider whether they should stay at the root of the monorepo or should be sent to the project’s directory. In either case, it’s recommended that you accept the current changes, but if the second one is the answer, you should create the file manually inside the dir after you solve the conflict.
+Now, lastly, you can take all the added files and move them to apps/single-app-repo. It’s important to adapt your structure to the Nx one, which means keeping the previously on root files inside the apps/single-app-repo and the actual source files inside apps/single-app-repo/src. If you already used Angular’s default structure, your work will probably be done by copying over the files to the indicated folder. Also, if you are creating an Angular app, it’s also required to adapt your angular.json file into the project.json file previously created by the NX generator.
+After everything is done and moved, do a last check to see if the history is correctly maintained. If you’re on VSCode, you can use GitLens and Git Graph to have a better visualization of it.
+First, stage all your files. Then, go to a file from the new project that you are confident that should have kept history and see if the blame is being correctly displayed. Lastly, open Git Graph and try to find commits from the original repo to ensure they are being carried over too.
+If everything is well, you’re ready to finally merge with git merge --continue and start testing your brand new project.
+You probably overridden most of the files generated by Nx initially with your own implementation. Still, it’s possible that some files were left over. They won’t affect your application, but it’s good for housekeeping that you take a last look and try to remove files. Look for nx-welcome file, a favicon that you won’t use and even loose and unnecessary .gitkeep files.
+Now that you finished migrating the old repo, you can safely remove it from the remote with git remote remove single-app-repo.
+Lastly, it might be a good idea to also tag your repository as deprecated and archive it to ensure no one keeps working on it.
+After you perform the merge, you’ll need to start testing to see if everything is still working as expected. If your application is building and serving plus you performed the steps to avoid side effects, it’s safe to assume that your code will behave as expected since at the end of the day it’s the same code.
+However, there are a few spots where it’s worth taking a careful look since they can get more easily affected by the repository change, so make sure you double-check:
+Environment settings
+All sort of assets, including images, icons and etc
+Localization files
+Path references for other directories, including node_modules
+If all of those are fine, as well as your other tests, congratulations, you just migrated your entire repo to NX and can enjoy all of its benefits.
+If git isn’t correctly detecting a file’s history when you are continuously merging, abort your merge with git merge --abort and increase the renameLimit to make sure that git is more flexible for considering a file as renamed rather than moved. This is specially used if you weren’t able to keep the same formatting and linting and are now left with modified files due to these differences.
+Many organizations are talking about how they are launching a modernization effort, but it seems as some organizations are struggling to move to React 18. It can be a challenge to get product teams to prioritize the upgrade from 16 or 17.
+There is a modernization strategy that can help make the transition without causing disruptions.
+There are several reasons why an organization should upgrade to React 18, beyond the basic fact that React 15 and 16 are no longer receiving security patches from the React team.
+Performance, with each new version of React, performance increases. Users are demanding more snappiness, additionally SEO is becoming even more top of mind for many organizations. Performance gains and the revenue they can potentially generate are a huge driving factor.
+Developer productivity, moving outside of the current major version leads to a large decrease in availability of maintained third party packages. Most large OSS libraries support n-1 (current major version minus one), so the moment a team gets outside of that range they risk being deprecated and having to support the package on their own.
+New Features, for organizations using SSR in particular the server side suspense that comes with React 18 is a real boost for performance and reduction in code complexity.
+Most of the organizations that we work with are rather large enterprises. Their development efforts typically consist of multiple teams, multiple backlogs, and multiple, sometimes competing, priorities. Trying to land complicated upgrades takes a rather large amount of effort just in coordination. This has an impact on overall delivery velocity as well since it distracts from feature work.
+We need to work in a way that is:
+Easily repeatable
Automated
Scalable
Non-disruptive
Incremental
While the main stack being mentioned in this article is React, it is important to mention that Module Federation as a strategy can be used with other frontend stacks; Angular, Next.js, Vue as well in Mobile with ReactNaive and NativeScript and backends with Node or Node with NestJS.
+When starting a modernization effort that involves frontend development, one of the table stakes for Valor Software in its modernization efforts is the use of Nx.
+Too often when working with an organization there are undocumented requirements, configurations spread throughout the organization, or just high levels of repetitive boilerplate. One of the first things we do in an organization is create a series of generators based on their current architecture best practices.
+ESLint configs, docker compose configs, sonar-cube, whatever they may be using, we take it and wrap it into a nice enterprise workspace generator tailored to their specific needs.
+Having organizational consistency, particularly at the onset of a modernization effort helps ease the adoption curve later on.
+Changing methodologies is inherently challenging, while many developers will be able to adopt quickly, others benefit greatly from having guardrails. Codifying the existing architecture best practices allow for organizations to understand how those are evolving over time.
+Often organizations have their own, boutique, combinations of technologies they have built their application architecture ecosystem around. An organization using React 16 with Loadable Components, Apollo Client all requiring SSR and backed by Enzyme tests and built with Make is going to have some adventures moving to React 18 for a multitude of reasons.
+Creating incremental demos allows the team to start with the working reference architecture. These demos start by following the current organizational standards, with all of their niche complexities. Once the Nx workspace is able to generate that reference architecture properly the organization now has its starting point for each of the next stages of platform evolution.
+Nx shines again in the aspect that after the reference architecture can be generated, so can the migrations. For the most part the changes needed to add Module Federation to a React application is a known quantity. Which configurations to add, where to add them and which things to test afterwards is also known. With the reference architecture created Nx migrations to perform these known operations. For those unfamiliar, Nx migrations work a bit like code mods on steroids.
+Each migration becomes increasingly more challenging but also reveals more value to the platform. Typically Valor sees the following steps.
+Move to Webpack 5
+Add Module Federation
+Create an application shell with target technology stack and methodologies that wraps the existing React 16 application and still loads them with an adaptor
+Migrate federated remotes to React 17 (this may be skipped sometimes)
+Migrate federated remotes to React 18
+Refactor SSR implementations to begin to use Suspense
+Most times there is also a migration to Typescript during one of these stages. This helps quite a bit with testability and reusability overall while also boosting developer productivity.
+Adding an Application Shell around the existing application, then added module federation gives teams greater freedom. With this combination they are now able to incrementally upgrade each of the federated remotes with higher relative autonomy. This allows the team to decide how to prioritize migrating the rest of the application. With clear communication patterns and backwards compatibility in place the efforts required to coordinate are significantly lower.
+Architecture standards change, design systems get updated, and as with all software breaking changes are always potentially around the corner. By leveraging an enterprise Nx workspace those change can be propagated throughout the enterprise with a single command, which can also be automated.
+With the new Nx workspaces and usage of caching builds also become dramatically faster (typically on an order of 10-100x). We saw one customer’s builds go from zero cache-ability and taking 90+ minutes to run, to high cache-ability around 300-600 milliseconds.
+Valor Software has worked with multiple organizations now begin this migration process and the results are, to put it blunt, astonishing. As partners with the team behind Nx, Nrwl, and the exclusive partners for Module Federation, we would love the opportunity to share specific details on how we can help your organization move to React 18 and Module Federation, schedule a call today!
+Module Federation is a technique that allows developers to split a large web application into smaller, independent modules that can be loaded on demand. The concept of Module Federation was first introduced by Zack Jackson. He later wrote a book on Practical Module Federation written together with Jack Herrington.
+Module Federation works by using a "host" application that is responsible for loading and managing the various modules. The host application loads the modules asynchronously and communicates with them using a shared interface. This allows the modules to interact with each other and share data and functionality.
+Module Federation is a relatively new technology that is gaining popularity among developers as it allows for more modular, performant, and scalable web applications. It is not specific to any frontend framework and can be used with popular frameworks such as Next.js, React, Angular, and Vue. In addition, module Federation can also be used with Node.js itself for Federation of server code.
+In this blog post, we will be discussing the business value of Module Federation. Specifically, we will look at how modularity and scalability can benefit businesses by reducing development time and costs, improving maintainability and flexibility.
+Module Federation allows for more modular and scalable web applications by enabling developers to break down a large codebase into smaller, reusable pieces that can be loaded and updated separately. This flexibility and ability to ship code allows developers to work on individual parts of the application independently, which can reduce development time and costs. It also reduces risk due to the fact that smaller segments of functionality can be tested as functionality is being deployed.
+In addition, the modularity of Module Federation can also improve the maintainability and flexibility of the application. Because each module is self-contained and has its own dependencies, it is easier to update and maintain individual modules without affecting the rest of the application.
+Module Federation can also increase developer autonomy by allowing developers to work on their own modules without having to coordinate with the rest of the team as they did when working with monolithic applications. This can lead to faster development and a more efficient workflow.
+The resilience of the application is also increased by allowing portions of the web application to be unavailable without impacting the entire application. This can be particularly useful for applications that need to be highly available, as it allows for better handling of outages and maintenance.
+Finally, Module Federation often reduces the bundle size of the application by reducing the amount of code that needs to be shipped to the browser on the first interaction. This can improve the performance of the application and can also be beneficial for search engine optimization (SEO) as it reduces the amount of code that needs to be parsed by the browser.
+There is public information available that many large and recognizable organizations are using Module Federation in their web applications. Some examples of well-known companies that have adopted Module Federation include PayPal, Best Buy, Lululemon, Semrush, Cloudflare, Epic Games, Business Insider, Box.com, Shopify, Adidas, Fidelity, Bytedance, and Chase.
+These companies have likely implemented Module Federation to achieve greater modularity and scalability in their web applications, which we can expect to contribute to the success of their web presence.
+It is worth noting that these are just a few examples of the many organizations that are using Module Federation. There are likely many other companies, both large and small, that are using Module Federation internally or externally in their web applications. This demonstrates the widespread adoption of Module Federation as a powerful tool for building modular and scalable web applications.
+If a development organization is looking for a technique that allows developers to split a large, monolithic, web application into smaller, independent modules that can be loaded on demand, Module Federation is the way to go. The reduced development time and costs, improved maintainability and flexibility, increased developer autonomy, increased application resilience, and reduced bundle size are just the beginning of the benefits.
+As we’ve shared, Module Federation has already been adopted by many well-known organizations, and it is likely that more companies will adopt it in the future.
+It is clear that business should try to stay up-to-date with new technologies and begin to leverage Module Federation, we know it will help them to stay competitive.
+Valor Software is a software development and consulting company that specializes in helping businesses modernize their web platforms and leverage new technologies like Module Federation. As official partners for the Module Federation organization, Valor Software has extensive experience and expertise in implementing module federation for businesses of all sizes.
+By working with Valor Software, businesses can take advantage of the latest technologies and techniques to build modern web applications that are more adaptable to changing needs and demands while also ensuring best practices through unparalleled access to the creator and supporting maintainers of Module Federation itself.
+Reach out today if you have any questions sales@valor-software.com +Zackary Chapple
+Module federation has been one of the most popular topics in development lately. People love the way it allows teams to develop applications independently and integrate them all into a single final application. While that seems good for the web, how could Module Federation look in a mobile native application?
+Let’s get the elephant out of the room first. The whole point of module federation is that teams can deploy their applications independently, but native apps have their bundles and code shipped holistically with the app. Even if they didn’t, having the user wait or be unable to load your app in bad or no connectivity would lead to terrible UX. Before going down this path, you need careful thought and a really good reason.
+So let’s start with a use case. One of our large enterprise clients has a WYSIWYG editor for NativeScript, complete with their own native components library. They have their own SSO and app “shell” that is common to all of their apps, but their users are able to customize the content, including pushing changes only to specific screens. To generate this they needed to be able to generate bundles dynamically and push them to the application so they could easily switch between apps, and update only the user’s bundle.
+This application highlights one of the beauties of NativeScript. The users don’t need to have knowledge of native code at all, and if they need to extend something, they can do it directly in JavaScript or TypeScript, while also allowing them to add native code once they feel like they need it.
+Now back to the application. This was initially built before bundlers were widely used, and once bundlers became the norm, it became a tricky situation where they’d need to map the available modules and override the require functions to provide the user code with the expected module. A mess. Enter Webpack Module Federation.
+import { Component, NgModule, NO_ERRORS_SCHEMA } from "@angular/core";
+import { RouterModule } from "@angular/router";
+import { NativeScriptCommonModule } from "@nativescript/angular";
+import { timer } from "rxjs";
+
+@Component({
+ template: `<Label>Hello from wmf! Here's a counter: {{ timer | async }}</Label>`,
+})
+export class MyComponent {
+ timer = timer(0, 1000);
+}
+
+@NgModule({
+ declarations: [MyComponent],
+ imports: [NativeScriptCommonModule, RouterModule.forChild([{ path: "", component: MyComponent }])],
+ schemas: [NO_ERRORS_SCHEMA]
+})
+export class FederatedModule {}
+Since we’ll need to download all the JS files anyway, for testing purposes, I’ve made it all compile to a single chunk and discard the non-remote entrypoint. To do this I used the default NativeScript webpack config and augmented with a few details to build it directly to my current app’s assets directory.
+const webpack = require("@nativescript/webpack");
+const coreWebpack = require('webpack');
+const path = require(`path`);
+const NoEmitPlugin = require('no-emit-webpack-plugin');
+
+module.exports = (env) => {
+ webpack.init(env);
+
+ const packageJson = require('./package.json');
+
+ // Learn how to customize:
+ // <https://docs.nativescript.org/webpack>
+
+ webpack.chainWebpack((config, env) => {
+ config.entryPoints.clear();
+ config.resolve.alias.set('~', path.join(__dirname, 'federated-src'));
+ config.resolve.alias.set('@', path.join(__dirname, 'federated-src'));
+ config.plugins.delete('CopyWebpackPlugin');
+ config.output.path(path.join(__dirname, 'src', 'assets'));
+ config.optimization.runtimeChunk(true);
+ config.module.delete('bundle');
+ config.plugin('NoEmitPlugin').use(NoEmitPlugin, ['dummy.js']);
+ config.plugin('MaxChunks').use(coreWebpack.optimize.LimitChunkCountPlugin, [{ maxChunks: 1 }]);
+ config.plugin('WebpackModuleFederationPlugin').use(coreWebpack.container.ModuleFederationPlugin, [{
+ name: 'federated',
+ exposes: {
+ './federated.module': './federated-src/federated.module.ts'
+ },
+ library: {
+ type: 'commonjs'
+ },
+ shared: {
+ '@nativescript/core': { eager: true, singleton: true, requiredVersion: "*", import: false },
+ '@nativescript/angular': { eager: true, singleton: true, requiredVersion: "*", import: false },
+ '@angular/core': { eager: true, singleton: true, requiredVersion: "*", import: false },
+ '@angular/router': { eager: true, singleton: true, requiredVersion: "*", import: false }, }
+ }]);
+ });
+
+ const config = webpack.resolveConfig();
+ config.entry = { 'dummy': './federated-src/federated.module.ts' };
+ return config;
+};
+One of the tricky parts of this whole process is that we can’t download the app piece by piece, as underneath we’re using commonjs (node’s require) to evaluate and load the modules into memory. To do this we need to download all of the output into the application and then we can load it. +As a POC, we can start with a simple remote configuration which allows us to load the entrypoint as a normal module.
+// federated webpack config
+{
+ name: 'federated',
+ exposes: {
+ './federated.module': './federated-src/federated.module.ts'
+ },
+ library: {
+ type: 'commonjs'
+ },
+}
+
+// host config
+
+{
+ remoteType: "commonjs",
+ remotes: {
+ "federated": "~/assets/federated.js"
+ }
+}
+And the import it as a route like:
+{
+ path: 'federated', loadChildren: () => import('federated/federated.module').then((m) => m.FederatedModule),
+}
+Unfortunately, we’d have to have all the federated modules shipped in the final application, so to load things dynamically, we should instead use the following code to load arbitrary entrypoints:
+/// <reference path="../../node_modules/webpack/module.d.ts" />
+
+type Factory = () => any;
+type ShareScope = typeof __webpack_share_scopes__[string];
+
+interface Container {
+ init(shareScope: ShareScope): void;
+
+ get(module: string): Factory;
+}
+
+export enum FileType {
+ Component = "Component",
+ Module = "Module",
+ Css = "CSS",
+ Html = "Html",
+}
+
+export interface LoadRemoteFileOptions {
+ // actual file being imported
+ remoteEntry: string;
+ // used as a "key" to store the file in the cache
+ remoteName: string;
+ // what file to import
+ // must match the "exposes" property of the federated bundle
+ // Example:
+ // exposes: {'.': './file.ts', './otherFile': './some/path/otherFile.ts'}
+ // calling this function with '.' will import './file.ts'
+ // calling this function with './otherFile' will import './some/path/otherFile.ts'
+ exposedFile: string;
+ // mostly unused for the moment, just use Module
+ // can be used in the future to change how to load specific files
+ exposeFileType: FileType;
+}
+
+export class MfeUtil {
+ // holds list of loaded script
+ private fileMap: Record<string, boolean> = {};
+ private moduleMap: Record<string, Container> = {};
+
+ findExposedModule = async <T>(
+ uniqueName: string,
+ exposedFile: string
+ ): Promise<T | undefined> => {
+ let Module: T | undefined;
+ // Initializes the shared scope. Fills it with known provided modules from this build and all remotes
+ await __webpack_init_sharing__("default");
+ const container = this.moduleMap[uniqueName];
+ // Initialize the container, it may provide shared modules
+ await container.init(__webpack_share_scopes__.default);
+ const factory = await container.get(exposedFile);
+ Module = factory();
+ return Module;
+ };
+
+ public loadRootFromFile(filePath: string) {
+ return this.loadRemoteFile({
+ exposedFile: ".",
+ exposeFileType: FileType.Module,
+ remoteEntry: filePath,
+ remoteName: filePath,
+ });
+ }
+
+ public loadRemoteFile = async (
+ loadRemoteModuleOptions: LoadRemoteFileOptions
+ ): Promise<any> => {
+ await this.loadRemoteEntry(
+ loadRemoteModuleOptions.remoteEntry,
+ loadRemoteModuleOptions.remoteName
+ );
+ return await this.findExposedModule<any>(
+ loadRemoteModuleOptions.remoteName,
+ loadRemoteModuleOptions.exposedFile
+ );
+ };
+
+ private loadRemoteEntry = async (
+ remoteEntry: string,
+ uniqueName?: string
+ ): Promise<void> => {
+ return new Promise<void>((resolve, reject) => {
+ if (this.fileMap[remoteEntry]) {
+ resolve();
+ return;
+ }
+
+ this.fileMap[remoteEntry] = true;
+
+ const required = __non_webpack_require__(remoteEntry);
+ this.moduleMap[uniqueName] = required as Container;
+ resolve();
+ return;
+ });
+ };
+}
+
+export const moduleFederationImporter = new MfeUtil();
+This code is able to load any .js file on the device, so it can be used in conjunction with a download strategy to download the files and then load them dynamically. For example, we can first download the full file, and then load it:
+{
+ path: "federated",
+ loadChildren: async () => {
+ const file = await Http.getFile('http://127.0.0.1:3000/federated.js');
+
+ return (await moduleFederationImporter
+ .loadRemoteFile({
+ exposedFile: "./federated.module",
+ exposeFileType: FileType.Module,
+ remoteEntry: file.path,
+ remoteName: "federated",
+ })).FederatedModule;
+ },
+},
+Alternatively, we could also download it as a zip and extract, or you could, theoretically, override the way that webpack loads the chunks in the federated module to download them piece by piece as needed. +Sharing the common modules +The complexity of sharing modules cannot be understated. The initial Webpack Module Federation PR that provided the full container and consumer API is smaller then the PR that introduced version shared dependencies. +A native app is not just a webpage, but the full browser itself. While the web provides a lot of APIs directly, NativeScript provides a lot of them through the @nativescript/core package, so that’s one dependency that has to be a singleton and we can’t under any circumstance have multiple versions of it. In this example, we’re also using angular, so let’s share that as well:
+shared: {
+ '@nativescript/core': { eager: true, singleton: true, requiredVersion: "*" },
+ '@nativescript/angular': { eager: true, singleton: true, requiredVersion: "*" },
+ '@angular/core': { eager: true, singleton: true, requiredVersion: "*" },
+ '@angular/router': { eager: true, singleton: true, requiredVersion: "*" },
+}
+Here we also share them as eager, since those packages are critical to the bootstrap of the application. For example, @nativescript/core is responsible for calling UIApplicationMain on iOS, so if you fail to call it, the app will instantly close.
+First, we create a simple standalone component that will show a Label and a nested page which will be loaded asynchronous:
+import { Component, NO_ERRORS_SCHEMA } from "@angular/core";
+import {
+ NativeScriptCommonModule,
+ NativeScriptRouterModule,
+} from "@nativescript/angular";
+
+@Component({
+ standalone: true,
+ template: `<StackLayout>
+ <Label>Hello from standalone component</Label>
+ <GridLayout><page-router-outlet></page-router-outlet></GridLayout>
+ </StackLayout>`,
+ schemas: [NO_ERRORS_SCHEMA],
+ imports: [NativeScriptCommonModule, NativeScriptRouterModule],
+})
+export class ShellComponent {}
+Then we can define the Federated Module:
+@Component({
+ template: `<Label>Hello from wmf! Here's a counter: {{ timer | async }}</Label>`,
+})
+export class MyComponent {
+ timer = timer(0, 1000);
+}
+
+@NgModule({
+ declarations: [MyComponent],
+ imports: [NativeScriptCommonModule, RouterModule.forChild([{ path: "", component: MyComponent }])],
+ schemas: [NO_ERRORS_SCHEMA]
+})
+export class FederatedModule {}
+And finally, we can setup the routing:
+import { NgModule } from "@angular/core";
+import { Routes } from "@angular/router";
+import { NativeScriptRouterModule } from "@nativescript/angular";
+import { FileType, moduleFederationImporter } from "./mfe.utils";
+import { Http } from "@nativescript/core";
+import { ShellComponent } from "./shell.component";
+
+const routes: Routes = [
+ { path: "", redirectTo: "/shell", pathMatch: "full" },
+ {
+ path: "shell",
+ component: ShellComponent,
+ loadChildren: async () => {
+ const file = await Http.getFile("http://127.0.0.1:3000/federated.js");
+
+ return (
+ await moduleFederationImporter.loadRemoteFile({
+ exposedFile: "./federated.module",
+ exposeFileType: FileType.Module,
+ remoteEntry: file.path,
+ remoteName: "federated",
+ })
+ ).FederatedModule;
+ },
+ },
+];
+
+@NgModule({
+ imports: [NativeScriptRouterModule.forRoot(routes), ShellComponent],
+ exports: [NativeScriptRouterModule],
+})
+export class AppRoutingModule {}
+Which results in the following screen, fully working module federation in NativeScript!
+Although Module Federations is still limited on the native application side, we’re already exploring possibilities on how to import modules from the web directly, instead of having to download them manually, giving it first class support and allowing full code splitted remote modules:
+const entry = await import('https://example.com/remoteEntry.js');
+entry.get(...)
+// entry magically fetches https://example.com/chunk.0.js if needed
+Module Federation is very promising for creating distribution of efforts and on demand releases without having to go through the pain of constant app store approval processes. While not for everyone it is a very exciting opportunity for large teams.
+Valor Software is both an official partner of both the NativeScript organization and Module Federation organization. If you’re looking at using Module Federation with your NativeScript application and would like some help. Reach out to our team, sales@valor-software.com
+Viktoriia (Vika) Lurie is the product owner for Module Federation and works for Valor Software. Zackary Jackson is the creator of webpack module federation and principal engineer at Lululemon.
+So, let’s start our conversation and talk about magic comments and how it’s significant that Rspack added that.
+Yeah. So magic comments is pretty much just a way to decorate what webpack should do to provide hints about what’s going on when a certain import. You can get around magic comments and do it stuff via webpack rules. But sometimes that’s a lot trickier. It’s like, do in a case by case scenario, something like a magic comment. There’s a couple like we can tell it what chunk it should be called.
+So we do have dynamic import, we can define its name that it should be generated as we can tell it to, you know, do like a recursive imports, like import this whole folder, import anything and chunk it all out. But we could say, but don’t bundle or chunk any .json files. So and that cannot be done through changing the webpack context through the magic comments. And then the other ones are like webpack, ignore, that’s probably the one that I’ve used the most for, I want to tell webpack, skip messing with this import, leave it as a vanilla ESM dynamic import, and the environment itself will handle it. So does quite a few like bespoke webpack’y things. But the challenges that we’ve had originally with looking at this for SWC; SWC doesn’t have any thing to parse comments, it wasn’t considered a valid part of the AST, but ESTree, which is what Babel and webpack are based off with acorn. They use comments as like metadata markers or additional things to perform on something.
+So anyway, it looks like they landed comment support for SWC. And now that will unlock the whole magic comment thing, because originally that was the one limits. It’s like well, we could implement it, but we wouldn’t be able to read any comments and perform anything accordingly. Which isn’t a huge deal. But it is really nice to have lots of webpack users implement the magic comments to make webpack do more complex things on a chunk by chunk or import by import basis. So having that means there’ll be a lot of feature parity with how certain things work where. Like they depend on webpack importing the mega nav as you know, Mega nav.js, not just 381.js or whatever name that it’s going to come up with. So preserving those kind of capabilities. In the parser itself it is a really big bonus. Not having to write everything is regular expressions or rules or stuff like that up front in build. But being able to do this on the fly, it lets us do some interesting things like we can create a loader that creates imports with magic comments.
+So you can get into like meta programming. Because now I can say okay, based on what you’re doing, we’re gonna say, here’s a fake file that does these importing things and change how you import it. But we can do that as we’re printing out the require statements, not like I have to go and reconfigure Rspack or do something like that. So it offers that really nice capability of generative adjustments to how the applications built while the application is in the middle of building. That’s not a wide use case. But when you need it, you really need it. And I think another cool one that we do with magic comments is this one called provide exports. And let’s say you dynamic import icons. And it’s going to make, you know, 5000 file chunk, because you have 5000 icons.
+So usually I would go like import icon slash, wash with care. And then I just download the one icon. But if we get off the index file, I’m going to get everything and if I’m doing a dynamic import, that means I’m going to split that file off and get what I need out of it. But webpack is going to know, what do I need out of it after I’ve split it off. But with a magic comment, this is similar to our reverse tree shaking ideas. Is it a magic comment? Then you can pass a thing called provided exports or used exports. And you can actually tell it, hey, I’m importing icons, but I’m only using three icons. So when you split this thing, tree shake everything else out of this dynamic import except for these four exports or so that I use. And so that’s really powerful to create code splittable tree shakable code in really advanced scenarios where you’re trying to lazy load something that’s usually a big library but you’re only want one thing out of it. And so the magic comments for provided exports, or used exports is super handy.
+That sounds really interesting and cool. But let’s talk about delegate modules. So, the post we did about delegate modules, it really got a lot of comments and a lot of interest. Can you please share on how you’ve updated your reference architecture?
+Yeah, so I still have delegate modules filed as a beta capability for the Next.js federation plugin. Mostly, I’ve just left it in there, because our next implementation is the most advanced we’ve got. And so anything new that we want to do, we can do it on the Next one, quite easily, since we’ve created kind of our own monster inside of Next that lets us do anything that we want. And it’s a really nice platform for reverse engineering Next.js, at this point.
+So delegate modules fit in really easily there, because we already had big, like intelligent plugin on top of it to make the Next play nicely. The idea is going to be with delegates is to extract this logic out of it, and move it into one of my other universal packages. That’s not directly tied to the Next repo, like I might put some of this in the Node package, or might put it in the utils package or something like that. And that’ll give it to everybody. But yeah, the progress so far has been pretty good. We’ve been able to find a couple of bugs along the way, and how to implement these things, just the right way everywhere. But if you see some of the examples staring with the word delegate on my example folder, then that is a delegate module example.
+So we’ve got a Next delegate and we have just vanilla webpack delegate, which was like the first one that we did, just like test the theory. And then it worked. And so it was like, okay, cool. We’ll make one more example, with the delegate module, we’re using Medusa and all the vanilla ways. So what delegates has kind of given us so far, and in my examples, like, I think the best one is going to be the Medusa plugin that we use in Federation. So as of yesterday, I have a Next.js app deployed to Vercel. And it’s, you know, for federated Next applications. One of them is a shell, and the rest are components or other pages. And now, with those delegate modules, I can go to any pull requests that I have open on Vercel. So a different branch, where it’s just it, my delegate module is implemented here, and maybe I forked the branch and I made like a blue version of the header, or something like that to kind of test it out. And then let’s say I fork it again, and I make a red version of the header. Now I can go to either original Red or Blue versions of the header. And on either of them, I can go back into Medusa and I can change the version and I can make the blue one be red, or just be nothing and all the other pull requests, I can essentially change them as well. So my open pull requests don’t actually mean anything anymore. It’s just like a domain that I can go and hit. But effectively, all three pull requests. If I change in Medusa, all three pull requests are going to show me the exact same code change, because I’m able to link it and say well use the header from pull request number two, even though you’re currently green, use the one from red and pull it back in and do that on the server or in the browser. So seeing that be manageable was a really really big thing to see because we’ve mostly only seen Medusa managing things up to the browser. So seeing this now actually go all the way into the server. The server is not responding asking Medusa what to do, and Medusa is telling it what to do and then it comes back to life in the browser. Without any hydration errors or warnings or anything like that it is really really impressive. And then on top of it, the delegate also has like this concept of an override protocol.
+So this is very similar to what we wanted to do with Medusa and adding like a Chrome extension, so that I can say, well, this is what Medusa is configured to. But when I go to production, I want to see blue header just for me, nobody else. So we kind of just implemented the poor man’s version of it, where I wrote something that we pick up right at the beginning, and then I process it, and then I update how webpack does something and I call hot reload, and then it pushes me through with to the updated site. And so that is, let me now have Medusa managing everything and then right above Medusa, I have if overrides exist in the buffer, read the override, find the current remote entry, that override is forced. So if it’s like overrides home, and then here’s like, the version and the hash or whatever, then just from a query string, my browser and will change the blue nav back to red or to green. And then if I delete the core string and reload it again, I’m getting the red now. And I’m able to do that across any of the pull requests. Again, we’re now have Medusa and I have a way to override before this thing asked Medusa, it’ll ask some system that I have on top of it, and then I’ll go okay, well, you’re not doing anything with it. We’ll go to Medusa for the main config. And this is really powerful about delegate modules, because we can keep adding layers above or below it. Medusa can just be one of the calls. And you know, I was speaking to somebody about security and compliance. And they were saying, well, if Medusa got hacked, couldn’t somebody do a lot damage, like changing your script URLs? And then it’s pointing to your source of truth? And I was like, well, yes, but we’ve got security layers, several security layers kind of baked in here. But we can also set policies inside the delegate module actually. So we could say, when you asked Medusa for something, check the domain Medusa gave back, is the domain somewhere registered inside of your infrastructure? Is the URL part of your company, like there’s no rogue URL coming from some other location? We could have the delegate module, kind of be a safety check and read what Medusa is about to give to webpack and validate, if that should be allowed or no, that shouldn’t be allowed. And if it’s not allowed, we could always just short circuited again and say, okay, well now fall back to just the stable release, like maybe we have a bucket, like the stable channel that we hard code. And so we know whatever stable release we put up is lululemon.com/you know, remote slash stable slash remote entry.js. And so now I have three mechanisms available to me, I can override it on the fly, I can ask Medusa for it, I can verify what Medusa is doing if I need to do any additional checks. And then lastly, I can also just retrieve, what should I do if both scenarios don’t match the requirements, and I can have a third fallback on how to go and do something in there.
+But it’s three completely different mechanisms on how to acquire the connection interface to the two different webpack containers. So it’s just offers a ton of power. Like, I think the way that I would see delegate modules is with it, we could probably create our own metal framework around Module Federation. That’s how much power it gives you because it’s got middleware in there, if we want to do something, say like Next.js, where every page loads data, and you know, it does that whole thing, we could probably wire a lot of that stuff through the delegate module, if it needs to load data, then we could attach that on. So just what webpack gets is an interface specific to any kind of side effect that we want to analyze or understand or respond to. So if we know hey, the page coming in, it is going to be this type of data fetching page, we could wrap the delegate module to return that kind of construct to for fetching data, like if it was get server props, which is something special in Next. So it’s really nice that we have that level of control. It makes me feel a lot like delegate modules is just like Express middleware inside of webpack’s require function where you know in between asking for something and getting it back, you can do whatever you want with it. And then finally, you feed it to webpack so it’s a ton of control compared to anything we’ve had before.
+Yeah, this one sounds really powerful.
+This is probably the the biggest technical unlock since Federation was created. From all the features it’s got this is probably the most powerful one made available, which is why I’m so excited about it.
+Could you also use something like circuit breakers with delegates module switching a federated remote based on error percentage, or latency?
+This was something I was speaking. And it’s also kind of where I think Medusa could be useful. Because when we’re speaking about a lot of these type of capabilities, the one area that always kind of gets blocked is who ingests the information to respond to it, so I can have a performance monitor. And that’s great. And I can either make it trigger something in my CI or do anything like that. But you get to the spate of the problem I find with it is whenever you do things in CI, it’s very dumb. Like CI doesn’t know much. We’ve made efforts to do things like static analysis for security, or linting, or other kinds of tools like that. But CI, effectively doesn’t understand what’s happening, it’s just going to do it, do a job. And as long as it doesn’t break, doing that job, that’s kind of all it knows about. Performance monitoring, on the other hand, might know a little bit more in depth of here’s the area or here’s where it’s tagged to be slow. But it doesn’t actually know well, what to do with that. So if it can only send me a very small piece of information, like the header is slow. How do you translate that back into a big company with like, 1000 repos that are created and destroyed all the time? That oh, this map is still out? Or how do you maintain that link? So that you know what they’re talking about - here is actually this header over here. So with delegate modules that offers us this option to say, okay, well, we can retrieve some info to understand what our performance looks like and adjust it accordingly. But we need to know somebody needs to be the adjuster almost. So if we use something like Medusa, where we started sending back RUM information to Medusa, Medusa could see, hey, the header was just released, that slowed only the site’s down that are using this new pin version of headers.
+So now we’ve reduced the scope, it’s not something slowed the site down, it’s this release just happened. And everybody who took this really soon saw a similar increase in latency or performance. So now we already have a good understanding of what most likely caused it. And then we’ve also got a good understanding of what’s the impact radius of this. So now I could start reporting say, hey, the navigation is have a performance problem. And it’s currently impacting these four applications here. If it’s a critical problem, where you could create rules to say, you know, like a threshold for an alert, if it becomes X percent slower, we could say, okay, Medusa sees a big change in it, pin it down to the previous version, and see if maybe do that on A/B test. So set a cookie or something to track and switch this user back with a different identifier to the mitigated mode and makes a 10% of traffic, get that mitigation response. Are we seeing mitigation mode, improved performance, and there’s no error increases? If yes, we could then say, okay, push that to all delegate modules, and now we’ve rolled back the site, but we’re able to programmatically do it and almost validate what Medusa thinks it is. Like, you know, it’s a self fulfilling validation, you know, we’re sending it wrong data and well, let me tweak this, what did that do? Okay, everything went well, let me roll it up. Oh, if we rolled it up long, we suddenly see a problem, okay, undo that option. And it’s back to whatever. But either way through delegates, it gives us this these capabilities, where we can now dynamically change how things are done. In the browser, it could be say rolling things back or rolling things forward. On the server side, I think it’s a little more interesting, because if we say look at edge workers with Netlify, and Module Federation, we could then measure what’s cheaper. Is it cheaper and faster to send a request to another edge worker to print out headers, HTML, and then have Webpack get to have a federated import of header? But we have a delegate module that changes it to not download code, but instead, fetch the HTML and then return it as like module exports a string.
+So now I’m importing a string, that’s actually the reply from another edge worker. And that becomes the stuff that that other edge worker did all work to make my header. But if that’s slow, like if it takes, 50 milliseconds to connect to the header, and we’re saying, well, header only takes two milliseconds to render, the system could self optimize and say, well, we’ve seen that it’s actually faster if we just pull the runtime down and run it on this one worker. So we’ll do that unless a partner comes under heavy strain. And then you could say, well, in the next invocation, push it back out to another worker. And now we can kind of have an elastic computing system where it can become a distributed parallel computing system, or it can fall back into more monolithic in memory based patterns. But you know, that’s something you’d have to usually build a whole big framework around. And you’d have to deploy your application specifically for the limits of workers and stuff like that. With Federation and the Node Federation on Netlify, you can kind of just deploy an app, like in whatever shape that you want. And it will work. So I can deploy this thing to no JS. And I could then say, okay, well, let me push this up to the Edge, and it would work just fine. I don’t actually change how I wrote any code, it’ll just know it’s in the Edge network, and how certain things need to be done are a little different. But I didn’t have to design and develop an Edge worker application, I just built the app and let the building tool take care of making sure it runs wherever it’s supposed to run. So it gives a ton of flexibility there, even for things like imagine. Edge workers really good, but it’s lightweight. So if you have a really heavy task that needs to be done, sometimes it’s better to send that back to the Node Lamda.
+So this gives us this kind of three dimensional scaling, where we can either scale, you know, horizontally across more workers or, you know, contract down to fewer workers. Or we can also push the computing between Node.js and the Edge on the fly. So now you could have your slow note server does a cold start does the one complex job that it needs to do and then there’s another 10 things that could do. And it could say, well, those things have been light in the past, let’s send them out to 10 separate workers and process them all in one go. Instead of sequentially, do one, do two, do three, do four, and then send it back. But yeah, so that’s like one of like, the more out there, possibilities, but it’s definitely something that the design of this delegate system allows for things like that stuff that you previously, that’s just not possible to make stuff like that work, especially on like an Edge layer. But for us, it would just be, one NPM package wrapper, like a special delegate module called like, the elastic compute delegate, or whatever. And then that thing’s designed know, okay, I can go here, I can go there, I can go wherever. And then how you use this component is similar to normal, like Module Federation patterns that we would want, like how server components would be, you don’t make it, you don’t send it a bunch of data, you don’t pass it context, it’s more will serialize, a little bit of data, send it over somewhere else, it will do the work. And the little data that I send it is enough for it to understand what it’s supposed to do. But it does its own heavy lifting, fetches its own data, and returns everything back, which is the component level ownership model.
+So if you’re following that already to make distributed systems more reliable. That also means there’s a high chance you could start splitting it across different compute primitives as needed, and actually scale up and down your workload, because it would follow someone to kind of construct and now we’re providing that glue code to let something like this happen, which would be very hard to manually do in a like, time friendly way.
+Thanks for sharing! All right, so now - demo. Speaking of making distributed systems more reliable, we haven’t shared about this for a while. Let’s do a quick demo of the new reference architecture, and its configuration was delegate modules.
+Sure. So um, heads up, I can’t show my reference architecture right now, but I have a simpler app that still working and letting me click around so I can go through like a three you know, the each important page nothing super fancy, but it shows the all the parts that we want working.
+It works perfectly.
+So Medusa has undergone several drastic iterations of improvement. A lot of really good work has been done around the UX and the design of it. I think when this originally started, it was a very simple project. It worked, but it was more like, here’s a concept proven, achievable. Not, you could run a real application off of it. And it’d be quick. So, since those early days with the help of Valor team, this thing has really exploded into a nice really first class product. So one of the big things that I just saw is we’ve gotten this new UML diagram. My old UML diagram was pretty flaky. But it mostly did the job. But this thing is a lot more well laid out, and offers quite a few room like just more room for improvement if we need to continue increasing the amount of data that you can see in the UML view. So you get better views, and better interconnects. Like, it’s easier to see who connects to what and things like that. And into the future, we will see a lot more feature capability to be able to come out of the UI that we’ve laid down here, which I think is the big one is how do we build up a UI that’s going to allow us to move forward without redesigning it like five times over? Oh, you know, what’s the most complicated use cases, cool, those are far away. Now, let’s just make things better. And it trends in this way, that gives us you more power over time.
+So wherever UML is in here, we have our dependency table that still shows shop is vending pdp shop and a page map checkout is, you know, title, the checkout page or map. And then home is, navigation, the homepage, and it’s page map. And then in here, we also see like who is vending modules, so we can see everybody who shares so we’re seeing all of these all offer, this package is shared. So it gives you a nice idea on what’s available and what’s required in various parts of the application.
+We’ve also got our Node Graph here, which has come a long way as well. A lot more readable. And I love the sizing, that it’s scoped to the size, so you better understand how big a remote is, or how many connections are made to a remote over certain other ones. But you know, if I want to see who uses this title component from home, I can click on and I see okay, checkout depends on title. And I could see well, who uses shop, okay, shop is consuming shop as well. And it’s also used in home and checkout. And you know, we could go and look at the product page and say, okay, product page is used by shop. But look at shop, we can see shop is connected to nav page map product page, it’s connected several parts of the application here. And then you can also go in here and like choose the Node that you’re that you’re trying to find if you’re trying to search a specific note up. So it’s also a lot easier to navigate as the systems get much larger. And we can also look at the depth of the nodes, which is a really nice feature to be able to see like how deep down do we get? Or how many or few nodes do we want to display? Magic, you had like 1000 nodes in here, being able to filter the depth, those down would be useful, especially as nested remotes and things like that come along. And we can also filter these things out by direct connections, not 100% sure if those are all wired up yet. Oh, yeah, direction of connections. So then if I have that on there, I can see which direction it’s going. It’s a little hard to see. But you can see I have these arrows on here. So now I know who’s consuming it. And who’s providing it which thing is a big, useful thing to know is like, well, not that these two are connected somehow. But like, who is it? Who is it that I need to go and spend? Like if I’m going to change nav, who do I need to go and update? Okay, so I need to go and look at checkout shop and home because nav is going to be impacting these three.
+And then we get into our Dependency graph, which is like our old Chord graph that we still have, which is just another way to visualize what’s going on to see what are all the interconnects overall, and how everything kind of spreads across and connects to our other dependencies.
+So once we’ve kind of gone through these applications, if we go back to their UML, I could flick into home and go to the remote. And now I’ve got you know, what are the modules exposed? And where are they on the file system of this repo? If it’s requiring anything additional, like anything that’s a shared module or something like that, it usually will get listed here. I don’t get React or some of the default Next.js things listed because those are considered hardwired to next so we just mark them as external so the remotes don’t even worry about negotiating React, because in order to live inside of Next, React has to be there. So we don’t really track those kinds of things in here. But if I were to add Lodash, it would pop up and say, hey, you know, this thing requires Lodash, because that’s the one like shared vendor or outside package that it’s dependent on. We’ve also got, you know, everything that is shared and network versions that shared out. So it makes it easy to understand who’s on what. And we’ve got the direct dependencies. So this is everything that’s listed in your package json, and as well as we can see who consumes it so I can see cool this thing consumed shop, checkout and title. And then up here, of course, I’ve got my version manager. So I can go in here. And I can choose between, you know, I have a timestamp, I also have the git commit hash, you could have a pull request number, or you could calculate like a semantic version, like you would for an npm package. And those can be listed here as like what you’re pinning to. And so then the other thing, as well as we also have, like the version comparison, so you can see over time, how has this container changed, like if we upgrade React, I can see the date that happened. If I change what I’m sharing or consuming, I can see the date that a new shared module was added, or it started importing a new federated module. So I mean, even here, you can see I’m using 2.8.3. and then I’m now using 2.8, beta, beta two. And then over here, and you know, this dependency was 6.1. and now it’s 6.2. So it’s very useful to see like, well, when did change occur on your dependency tree? And in distributed systems, or even in a single repo, this is really complex to find out when this type of information happens. If I see a bug start occurring in production, well, what happened? Okay, release one out, well, did we only find it now? Or did it actually happen on that release? So you have to dig through the Git history and try and understand what might have happened. But with a view like this would make it a lot more digestible to go in here and see, okay, something’s wrong, what recently changed in our supply chain. Okay, somebody updated some cookie utility, right around this time. And imagine if this view take this feature, send to Medusa, imagine if this view had a API connection to Datadog or to Sentry. So you can start to see under every release that gets cut, here are the tags and error types that are coming along, or here are new errors that were never seen before, only when this release showed up. And it helps you to be able to start correlating information. And again, with a lot of tools like Datadog, they aggregate so much about what’s going on. But none of these tools natively understand how the application was built, and how it’s supposed to behave. Really only webpack has a deep understanding of that. So when you start taking these tools that don’t know much, and you apply them to basically an ingest engine that understands the webpack part very well, we can start to draw conclusions about, hey, this is likely this thing. So it just adds a lot of new type of options.
+That’s been very hard to tame or control, even in a single repo frontend. It’s still hard to manage who’s using what and where, even with npm packages, we’ll who’s still using version one of the carousel because we want to remove it from the component library. Okay, now I have to like do a search across 1000 repos and hope that GitHub search is good enough. But if everybody was reporting to Medusa, I could just go into Medusa and say, okay, who uses this package everywhere? Cool, here’s the exact file and line that the import is on. And you instantly can know this huge amount of information about your supply chain. So there’s two other really big things that have come along recently is we’ve gotten organizations. So now if you’re a company, you can register with an org. And you can provide other roles and permissions to your users under that org and you can start to manage and scope it. So certain users might only be able to have read access to it. And maybe you want to have only your AWS keys or something like that have the write tokens or anything like that to edit or change. You’ve now got like a policy in there. So it’s not just the trust scenario, and you’d be able to scope out certain apps. So hey, you know, the retail group doesn’t need to see the North American Webapp group.
+So we could have a Lululemon organization, but we could have two separate groups under there that each see everything about what they’re doing, but there’s no interconnect, so nothing has to be implemented. And another idea is that by putting the ability to put policies around the apps I think was something we’ve thought about doing possibly in the future. So then you would hold them it’s like role based access permissions. So now, you know, if I’m in a bank, and I have accounting, trying to pull in a federated module that’s usually on like the public frontend site, you could put security measures in place to say you’re not approved to be able to consume that remote from this post, like, there’s not allowed to be crosstalk here. So that provides a layer of governance on top of something that’s very hard to govern, because I can just go and drop a script in anywhere or add a cookie. And once it’s there, it’s very hard, you have to do something like, you know, what is it content security policy, that still only works like the domain level, so you either have to build infrastructure to block it behind a reverse proxy. Meanwhile, a lot of it if the glue code was driven by something like Medusa, all that rules could be applied right to the webpack runtime, and it will be much harder to circumvent webpack and reconnect something you’re not supposed to, because Medusa is kind of driving the whole graph, and everybody’s using this thing. So, you know, it adds a good layer of security, a good layer of separation and multi tenant users. So it’s just a lot. You know, it’s a big feature of always wanted in here, to go after enterprise customers, where you often can’t just do a single login, they’re gonna want it behind their SSO, and they’re gonna want a org based thing to revoke and grant access to users as they, you know, come and go.
+I would say, I think what you’re describing there, to kind of dive in some details, I think part of it is, as a developer, creating a federated remote, you can specify this is just for EMEA, APAC, or internal, this is just for external. And then being able to do that other people can find stuff that’s targeted for that. But when they do find something that’s targeted for that, that they can’t use, they at least understand why, and have a way to then reach out to those teams and communicate with them. Okay, this is labeled as internal only, I need it for my external application, can we either add it to be external?
+Or can we intake and basically open a ServiceNow ticket go through the intake saying I request this federated module from a different director, Umbrella. And now that now there’s a governance thing in place, you can’t just hot inject something or things like that, you still have that flexibility, but it’s, you’d have, like your team, and governance know what’s happening, which is really hard to do with npm packages, like you’d have to either way, it’s a really expensive problem to solve managing code and permissions of who can do what you have to set up your own custom npm registry, or what rules are other things that could be bypassed. And that still is like deploy based, but want to now approve this, I have to do some update to all the code bases, I can’t just go to a central engine, and say, yes, so and so authorized apps wants to access this in these environments with this token, and only this token, no other read token is allowed to access it.
+So that does provide a lot of real maturity and flexibility, given the wide landscape of how different companies and compliance kind of come together. So then I think the last one, which is really great is we’ve, for a long time, Medusa supported two environments, development and production, and there’s a kind of hard coded into its database. So that sounded good initially, because really call them either in dev mode, I’m in prod mode, but it gets a little trickier with, like staging servers, or things like that, where maybe I want to control the staging environment, or if I have, I have about 15 different environments, they’re all hooked up to different backends or versions of say, GraphQL API’s or something like that. So maybe they’re testing a feature against, say, stage or preview or QA environment, so and so. So might want to just say, okay, in Medusa, if you’re this environment, here’s your pin controls, here’s how you’re being managed that I can just say cool, bump stage to the latest now in QA, or bumped some other part of the application stack, not just either development or production. But now I can have multiple layers. And the idea is all the builds would feed into the database. And the build isn’t set told that it’s production or development in like a hard manner. When you’re sending the build to Medusa, you can say, yeah, this is intended for productions. And productions pin, the latest it’ll grab this incoming one. But I could also say, well, this is a stage PR, and it would just show up tag the stage, but I could still go into say production and I could see that release in there and I could say okay, use the one that’s on stage, and I could connect them, which is again really flexible to be able to add unlimited environments and change there’s lock files accordingly. It’s very nice. Like, you know, you could create almost like a code freeze environment. So it’s still production but you could just call a new one code freeze as soon as we hit code freeze, this is the environment that we’re going to be going for. It’s the frozen one, which we know is like solid and stable. And we can also set up another environment that’s like failsafe.
+So if in code freeze, something goes wrong, and we need to rollback, we could like battle test QA, a backup, you know, like, configuration of the application, if we need to do anything emergency, we could just go in one place and say, okay, production, you’re now going to read the, you know, backup, frozen backup environment. And now the next invocation that will listen to all of that, but I can go and swap those things out on the fly, and you know, reallocate what this environment is, or create another copy of that environment, apply changes to it, and make it point to, you know, a different, more robust config. Which is, which is really nice. Like, you know, imagine if we had personal environments. So what if I had, like Zack’s environment in here. And so then I had an override inside of my initial request. So if I go to production, and I’ve got the Zack’s use X environment tag, then production will do a one off response with paragraph configured the entire app. So I don’t have to go and tweak production to see what’s going on or override each remote individually. But I could just say, hey, use my personal environment, execute my federation, kind of schema against some Lambda somewhere that’s managed by Medusa. But that’s also very nice if you want to say have like a personalized thing, like I’m working on four different teams on implementing the same feature. And we’re all in separate repos, where we could create, you know, JIRA ticket environment. And now, locally in stage, wherever you’re going to have every contributing party’s code pulled together just for this features that you can all look at it and work on it easily. And you’re all just pointing to an environment that you can then remove later. But you know, it gives you a ton of flexibility to do things like that just reworking or, you know, or other things where I could say, hey, okay, I could use Zack’s environment as the connection, and I can open a local tunnel on my machine so you’re actually getting your remote is actually my computer’s local build serving to you over a tunnel.
+Now, if they’re connected to my environment, I’m kind of acting as their remote and I can edit things while they’re working on my feature, but we can work remotely in tandem. With our changes being pushed and pulled without Git, just every time they press Save, I see the change show up when I refresh my page, I don’t have to get pull or do anything. So that’s also a really powerful potential impact for stuff like this could help change how we work and collaborate, especially in distributed systems, or in Palaeolithic systems, where there’s usually many moving parts that need to come together. But it gets very hard to scaffold, how do those moving parts come together just right for developing whatever the use case is without creating a ton of infrastructure and kind of manual work, to recreate 10 services over here, just that we can customize them? What all I really want is I want to link to 10 different folders than usual. I don’t actually need 10 servers available and so on to do that. But traditionally, that’s how we’d have to do it within femoral environments.
+And talking about multiple and unlimited environments, how much do you think Medusa and Module Federation can help to save on the deployment infrastructure?
+So this has been a big one for me. I’ve personally been on this. Maybe I’m right, maybe I’m wrong kind of tangent. But I think also, if you’ve ever read anything from Tyson, he worked with me on Aegis and Node Federation, actually. But Tyson had a really good viewpoint when he started working with federated backends. And he kind of put it as when you have something like Medusa and Module Federation together, the concept of CI starts to lose meaning like there isn’t really CI anymore. It’s just continuous delivery. And, you know, most of the whole build and deploy infrastructure is kind of eradicated under a system like this, because the whole reason these things get so complicated is because it’s all based around uploading a Zip file with everything it needs to this machine. And if you need something else, you have to give it a new Zip file. And so that means you need lots of unique Lambdas so on and so forth, because they can only do one job at a time. But if we decouple the file system from the compute primitive, which is what federation does, you know, in theory, a really large company could have all of their QA, all of their lower environments could just be one Lambda called stage. And every time you hit stage, it becomes a different codebase on the fly just for you, and responds accordingly. I don’t need ephemeral environments or anything, because stage doesn’t have a file system that it’s coupled to, it’s pretty much it’s kind of a way I think of it as like, imagine, if you have all on GitHub as a Symlink folder.
+So then, all I’m doing is saying, okay, for this run, change what this folder links to and go require the same thing. And that’s kind of what webpack and Federation is giving us is that ability to say, well, the file systems anything, and we can change it whenever. And if that’s true, we don’t need hundreds of Lambdas and ephemeral environments, and you know, a big deploy system to manage, because fundamentally, there’s just not that much that they need to do, like, I don’t need an femoral environment, because the only reason I have one is because I need a different Zip file. So you know, you could just have two Lambdas stage in production, and that would probably handle all of your development requirements for the team of 500. And it’s just two Lambdas. So that simplifies everything a ton in terms of maintenance, and offers companies things like the managed model. So similar to like, you know, how Vercel does manage hosting, you just connect the Git repo and you don’t have to think about much else. Federation offers you the way to kind of make your own managed service. So everybody wants to, say a Next.js SSR, front end. But what all they really want is they just want to create a page, they don’t actually want the whole Next app and to maintain it, and to have a Lambda and all the CI/CD, they just want the page and a little dev environment. And then once that leaves their computer, as long as it runs, that’s kind of what everybody wants.
+So these kind of avenues allow you to offer that where it’s like, hey, you basically are just create React app, upload some static assets and that’s the end of anything you do. And there’s just one or two servers in here that are actually real server Lambdas. And their only job is to do anything that Webpack tells them to do per execution. So if you have that kind of model, you don’t need so much infrastructure, you just eradicate it naturally, there’s just not a need for the problem that a lot of heavy, expensive infrastructure solves. Which is what I liked the most about it, because I’ve always been frustrated out. Why is it so much work, just just to upload some JavaScript. If we think before server rendering, and before no JS, we think like to WordPress, and jQuery, it was super simple. Like you change something in PHP, and you just drag it to the server and you refresh the page and it shows up right away, kind of like hot reloading. Soon as it’s up there, you have it. There was no concept of like a build or anything like that. So it’s real easy, you just FTP and the next invocation and whatever you’ve done to the PHP is updated. And then on the front end side, we had stuff like jQuery or whatever, where you could add a jQuery widget to the page. And, you know, I feel like we could make probably sites that couldn’t scale forever. But you could create a pretty robust experience quite quickly, just because of how easy these pieces are. There’s no builds and wasn’t anything complex was no build needed. It’s just a couple lines of js. And there we go. And I really liked that model, because it was so simple. Like, you know, it took a couple minutes to upload a frontend, because you know, it was just a folder inside of a PHP server. And it was just some jQuery widgets that are on a CDN. But we lost a lot of that when we moved over to built applications. So you know, where I kind of see all of this as, hey, it just brings us back to a simpler time, but allows us to keep using more advanced systems. But the the kind of the operational expense doesn’t have to continue to bloat as the technology becomes more complicated. So seeing the simplify, and you know, if I only have two or three Lambdas, I can now focus instead of on scaling, Lambdas, and managing load balancers and route 53. And all of the other network stuff that comes with it. I could probably focus most of that effort on something like multi region deployments.
+So instead of deploying everything to one or two availability zones, which gets tricky to do when you have 40,50,60 different code bases that need to all be deployed multi region, it’s just a lot of pieces to manage and a lot of network to repeat 60 times over. But imagine if we only have one or two Lambdas deploying it multi region is just changing the YAML like, you know, the Get lab or the TerraForm file in one codebase. And now I can deploy this application across you know, 50 availabilities in the US. So I could scale it a whole lot faster, a whole lot more than what you usually could, because there’s not a big cost of change management anymore. It’s kind of managed. So you make the change, everybody gets it, you don’t have to ask anybody to go and do it. And they just want to build their page or their feature. That’s all they care about. And that’s exactly what they get stable place to build the page. But all the management pains is now in a centralized, more intelligent place. So it just makes life easier. Like I can’t imagine working on a non Federation powered system after working with one.
+Make sense. And would you still need two Lambdas if you’re using Netlify Edge?
+Possibly not. So I think when it comes down to the Edge, the only thing you’ve got to think about is what does your application use. So if you need to do something like use fs, which is Node file system, like package accessor thing. If I need to use fs, that’s a Node only API and edge workers is a is just V8. So it’s just the JavaScript engine of Chrome, it’s not actually Node itself, it’s just the one JavaScript handler. So it doesn’t really know what a require is or things like that. So it depends on what you’re trying to do. Some cases, it might be, hey, I need Node to handle like, these three or four pieces of workload. But maybe 70% of the app is just you know, standard React components or something simple, cool, only use Node for what’s needed and automatically propagate anything possible out to the Edge. And if you see that the Edge networks are getting slow to reply consolidated back onto one of the onto the Node process.
+So now Node doesn’t have to wait on a network call to the Edge, it’s just in memory, and it can instantly do whatever it wants. But being able to flip back and forwards as needed capability by capability is also a really big deal to be able to have. If we say you have a more like agnostic application, like let’s say it’s not something like Next.js, which has like a lot of Node specific implementations, then, like if we use Remix, Remix is pretty agnostic from needing Node or running on Dino or so on. So with something like that, I would say with the Federation capabilities on Netlify, you don’t really need a Node, like an actual Node server. Unless you need one that makes sense. Like my default way of going would be similar to how I’m approaching Rspack. I’m going to start with Edge. And if the Edge hits its limit, and I need to do this one thing, then I can just switch over this part to Node, but I don’t have to re-implement my entire system. Now for Node, it could just be okay, well, this won’t work for me any further over here. And I just drop it into a different spot. And I’m still good to go. But I can still move them back and forward in the future. It just keeps that interoperability there. So you can use the system best handled to cater for whatever need you want. Like let’s say we used Edge and we had Lambda for a couple of things. And let’s imagine we also had Docker. Now we have EC2’s persistent compute, that’s always online, always hot. We have Edge super close to user, but not extremely, like resource powerful. And we have Lambda, which is kind of like in between, it’s cheaper, but a little slower to start. But you know, it’s good for like, you know, burst loads.
+So now imagine if we have something say like a GraphQL endpoint, and we want to push GraphQL to the Edge. And we see actually, we’re not getting the level of caching or optimization that we want with GraphQL at the Edge, because there’s too many invocations on different CPUs, so it can’t build up an internal cache. So then you can say, okay, well, let’s rather run that back on the containers where they’re always hot. And they can have a big in memory cache of data and so through systems like this, you could just say, okay, we’ll send that over here to the Docker container. And now Docker become GraphQL. For me, and you know, all my rendering, let’s move that over to the Edge. And oh, well, this one little Lambda handler needs to do a couple things. It’s a bit memory heavy, but we’ll put that on Lambda for now. And then maybe if we optimize it in the future, we’ll send it back out to another edge. But imagine doing that with almost like a UI where you could just drag and drop bricks into a bucket like I want this remote to run here and that one to run there. And you don’t actually have to like think about the networking and the wiring but if it was something as simple as just you know, drag the square onto the type of machine you want it to run. There you go, or possibly a more upgraded one would be a, we try to automatically figure out the best place to run this. And we learn from every successful execution. And we can adjust the how things get computed based on how it’s working and find the most optimized path that gives you the most performance. And if something changes in infrastructure, the system could then immediately respond to that change, like an outage on us on AWS. We could say, okay, we’ll move on Lambda to Edge, it might not be perfect, but we’re just going to reallocate all the compute somewhere where we know to run while AWS is having failures, which is quite nice. And usually that has to be done through like multi cloud. It’s all infrastructure based to do that, usually uploading Zip files to several different places. But under this type of model, it’s more just well, here’s a zombie computer and tell it what to do.
+So now all you care about is the will, what’s the what’s the command that I’m going to tell it take care of at this point in time.
+All right, thanks for sharing. This was really super interesting and helpful.
+Viktoriia (Vika) Lurie is the product owner for Module Federation and works for Valor Software. Zackary Jackson is the creator of webpack module federation and principal engineer at Lululemon. This interview is the first of hopefully many diving deeper into module federation to help the community better understand this rapidly growing and evolving technology.
+Hello Zack! Welcome, I’m glad that we got the opportunity to have this conversation. I’d love for us to start by talking about the upcoming release. You already shared some initial details on our community call. What would you like to add?
+Yeah, so module federation version 7 has a new main feature in beta and this is the use of delegate modules everywhere.
+Could you explain what delegate modules are?
+Delegate modules solve a challenge that has been in the module federation space since day one. This concept that everybody refers to as dynamic remotes. All the examples we currently have out around dynamic remotes are mostly about how you inject a script using the low level module federation API.
+When you do this you lose all the nice stuff that webpack has to offer, just so that you can programmatically inject a script. What I’ve found most engineers want is the ability to dynamically choose the right kind of “glue” code.
+What they are trying to achieve is something like when a user clicks on a button there is an import that is based on a config the developer provided that points to a remote application somewhere. I found that in most use cases, developers don’t want full dynamic remotes; they still want to be able to use “require” and “input from”. They really want to control the glue code part of when webpack goes to request a remote and how it gets that container, and what methods they can use to retrieve it?
+The older implementation of this was achieved with the “promise new promise” syntax. The idea was that I can put in a giant string that webpack will take verbatim. When I copy that in and it’ll do whatever that string says.
+The problem with that approach though was that it is not very scalable. It’s great if you need to grab something off the window or make one API call. If you’re trying to use a library, or you want to do something like hook LaunchDarkly up to control decisioning, you couldn’t because you cannot directly import anything, in this case it’s very brittle and restricting.
+Delegate modules allow us to just tell webpack that this remote entry is actually code inside the webpack build already. With delegate modules you can kind of make a framework out of it, because it can bundle all kinds of entry point logic. What you’re exporting back is essentially a promise that resolves to a federated remote.
+If I want to use elastic file system (EFS) to get the remote entry on the server I can’t easily because by default the plugin only uses HTTP. While this is the easiest way to get a federated remote in the future I plan to add other bindings to read from the file system directly. The hope is to get this into version 7, but it will probably be like 7.1.
+Reading from the file, can you dive into a little bit on the use case of that? I believe you mentioned before that it’s for fallbacks but correct me if I’m wrong.
+One of the use cases is for fallbacks. Scenarios where something isn’t there when we expect it to be. There’s a couple of ways to handle it. If it’s a React component, you could do an error boundary or dynamic import, then follow that with a catch. In that case, if it’s offline, the application will throw an error and you would have to catch the error and handle it on an implementation by implementation basis to recover the federated remote.
+With the delegate modules, you can shim the module federation interface itself. What webpack gets back is a container, but the container’s functions are your own logic. So you can initialize it, however, you would normally initialize it.
+Then when you’re calling the get property on it, you could say if the get fails look at what webpack is currently asking for. If for example it’s looking for the navigation remote, and navigation tried to get the mega nav and that failed. The catch could just be dynamic “import from node_modules/”, you know, company name slash, whatever the original request was, meganav and to webpack, it’ll still think that it’s retrieving a federated chunk.
+By doing this you’ve actually just redirected webpack to say, well, that didn’t work now go and get this other piece of code and just return it in a federation like way. Then webpack doesn’t know if federation fails or not, you still just use that one import interface. With these changes you have a really robust set of middleware between the connection points between the webpack graphs, you have a lot of control over graphs and what happens. Fallbacks, yes that is one very useful scenario.
+The other big use case is on the server side. I might want to use HTTP to go and get a string off the VPC, evaluate that string inside of the VM, and then return it. However, that comes with some potential security issues.
+So in that scenario you could be using the AWS SDK or even pulling that string from a database value, right?
+This is the beauty of it. A database is one of the potential options that I’ve spoken about a couple times with bigger organizations like BitDev. If we put entries in a database it would be super fast to query where the remote is. The entries themselves wouldn’t be really that large.
+I think another really interesting aspect of using a database is from the security perspective. If you did use a database, you could have really strong user based access controls. If a host is not allowed to query a database or they don’t have the roles and permissions needed they can’t query this federated remote container back out. You could return a container that’s allowed that has a similar interface, but it’s not the admin one. The unauthenticated reply might still return a page, but it’s a page that says you need to log in.
+Does that also help with Edge Side Includes (ESI) and Key Value (KV) stuff on the edge that you talked about before?
+It can, because in places where delegates don’t work natively, like CDN’s that are not Netlify, what you can do is you could say, well, here’s a delegate module. When webpack requests a chunk, what you could do is return your own remote entry, where all doing is it’s fetching HTML, and it’s returning it as React components.
+On the edge network, you technically would have that ESI stitching layer, but it’s the webpack runtime. It would depend on when you render it, you have to be able to hit something that will render. It’s not super automatic, but it’s a whole lot less work when it comes to the implementation, because now you need a little infrastructure to do something with that.
+You wouldn’t have to build your application to run on the edge, which usually requires a very different kind of development look and feel when compared to a normal monolithic app. If you wanted to have some app run partially on node, and have part of it run on the edge, this would offer a more agnostic way for distributed systems to still work without you having to build your implementation top to bottom to be deployed to an edge worker. You could just say, I’m going to import this and this thing is going to live on an edge worker.
+This import then does the equivalent of markup stitching. Fetch the HTML, convert it into a little lightweight React component on the response and render that as if it was a React component.
+Another big concern has always been the security around fetch. What if you want to use your own fetch client, or you want to have cookies or bearer tokens or headers attached to the fetch request. It’s currently very hard to offer that to the end user with the current module federation interface. With a delegate modules how the code gets to webpack is up to you, the only thing that the delegate wants is to resolve a remote entry container.
+In the browser, it’s “window.remote”. On the server, it can be however you want to acquire that remote entry code. As long as you resolve back an executed remote, everything else is in your control.
+Another big use case I see it for is scenarios like how I don’t currently support file system bindings in the plugin. With delegate modules nobody has to wait for me or the team to build out the support. They don’t have to think of how to differentiate between when to use HTTP and when to use something like elastic file system.
+All they would need to do is in the delegate module do something like fs.readFile and point to the remote they’re asking for. Typically this is something like a mounted store slash this team name slash whatever version that I’m after. From there I can just use vanilla require to get that. Another option is to use the util, which would be based on the same one that webpack uses for its async load target. This would be similar to fs.readfile, and then VM run in this context. That way, we could refresh the container whenever we want to, because there’s no require cache that the container itself is getting stuck in. It’s reading a file and then passing it to a JavaScript VM. This is how webpacks async node target works today. Which is also not really any different from how a standard we pack build works when you put it in async mode.
+Can you talk a little bit about how using delegate modules increases the reliability of the code versus the “promise new promise” syntax?
+Reliability is a great topic to mention. Promise new promise is technically a sound option, if you’re only doing something simple. The problem with it though is when you’re sticking a bunch of code in a template string. There’s no syntax highlighting, you can’t use require, and you can’t use anything that is not already like in a transpiled form. The template string is not going through Babel or anything else. That also means I can’t use es6 in there or optional chaining, which would be really helpful or even async await. It’s also just brittle. Unless you make sure that you’re just putting simple es5in there, it gets a little tricky to try and manage it.
+The bigger problem with using a promise new promise template string is that you can’t really test it because it’s just a promise like it’s very hard to mock “well what is that going to do?”. When it’s a file loaded with delegate modules though you can put a unit test on it you could mock some environment for it to reach out to. You can confirm, hey, this thing resolves this mocked object that says get whatever they requested for you or it just returns a string saying I’m the fallback. Then you could know, cool when I do this import and it fails, the delegate module failure mode is doing what we want it to do.
+The bottom line is it’s testable,it has syntax highlighting, and it can be written in TypeScript not just a string. At Lululemon our promise new promise is over 200 lines of code. At that scale is where the problems start to come in. A lot of logic starts going in here, because you can kind of build a framework out of module federation now that you control the glue code.
+Webpack is your router, and how webpack gets to these chunks is basically up to you. So you can do a lot with delegate modules. From decisioning to permission based access, fail overs. Anything that you would really want to do, you could, you could do it without your developers having to learn another framework. They don’t have to know how to inject the script and do all of that. It would be one file that one team owns.
+The idea is to try and extract this out to a more reliable location. developers don’t really need to know about it. It’s more like a platform team thing. It offers the entire team as much control as possible to do what they want in regards to what is being fed to webpack. How’s it going to work, the rest of the development team still just uses require or import from, their implementation doesn’t change. Yet, they now have one of the core concepts of dynamic remotes, which is, I know what I’m importing.
+It’s not completely dynamic, where I don’t even know that I want something like checkout, or what I want from checkout. This is in that case, where you know, this is going to be a checkout page. I want to import, checkout/my- bag. That’s a very common use case, we know the string of a thing that we want to import, we know what the intent is at a certain location. Often though, they can’t control what remote gets loaded in there, because it’s usually hard coded. This is a very nice mix of static and dynamic you still get to use important or require, but you also get to write the connection code between webpacks host, the incoming remote and how that’s all going to look.
+You touched on something really interesting just now, you said dynamic but what you described requires having an understanding of what you’re importing. What about the folks that want to have it the 100% dynamic, where you don’t even know what you’re importing, you just get a JSON from somewhere that gives you the remotes.
+We still haven’t tested that fully in the server side environments, because I haven’t had a good use for it. My general recommendation has been to try not to lean on the low level API. Just because there’s some quirks to it. One of the issues we’ve had with Next.js in the past was, you’d always get this error that’s like, “can’t initialize this external” and it would throw a warning in the browser, we would still make it initialize, but webpack wasn’t able to start your remotes for you. So we had to put a proxy on top of the object so that when you try to access it, we could initialize it at that point in time.
+What webpack wants is all the remotes to get initialized up front. When you’re doing the super dynamic remotes thing, webpack has no idea there’s a federated module on the way. Webpack can’t try to prepare this thing ahead of time. The problem with that is, you can end up in a space where if you do a lot of daisy chaining of these super dynamic remotes,the first remote you initialized has less share scope than the last one initialized. This is because once you call init, webpack makes a copy of the object and seals it. If you add more keys and more shared packages from other remotes on webpack can’t do that whole negotiation thing where it checks, what do other remotes offer. share all the packages in everybody pick what we’re going to use, you kind of lose that because it doesn’t have that circular option to go around and, and check what everybody’s got. It’s going to initialize what it’s got, and share that. Then every time you tack something on, it’s gonna initialize and seal it in that same way. That’s the one reason I tried to avoid the fully dynamic option.
+We have other little low level functions in there that you can do it and developers and companies have used this in the past with minimal issues. Next.js used to work like this. You know it’s a viable option. It’s just one I prefer to say, if you can at least know what the import is. rather do that but you can also hook into and we might need to adjust the tool slightly, but we have all the low level bits and pieces for you to be able to access is similar to saying window dot remote name.init, window.remoteName, get and manually call things out of the out of the interface yourself like we could do that server or client side.
+If you are doing SSR, the moment of the page request, you know all of the federated remotes for that user for that session. Does that resolve part of the issue?
+If you had a map of them, and you said, okay, as this company, you query, and the query of the whole company, what are all the remotes that shell so and so could use? All right, there’s 25 teams that work under this shell, we don’t know when or where they’re gonna come from. But we know there’s 25 teams, okay, awesome. When the app starts, you could say go loop over all the remotes and call initialize on them, and just start initializing everybody, then, initialization is almost separate from getting.
+So once initialization happens, then you can dynamically flip between whatever you want programmatically, or flip between two different remotes on the fly, just saying, you know, like, I’m gonna get for ease of use, let’s say we had a utility called get remote, and you give it a name, and it pulls the remote off the scope, whether it’s window, or it’s my global scoping that I have a node, get remote name, cool, here’s the container. Then you can initialize or call a getter or from that, however you want to, and that would offer you full programmatic control, while still ensuring that hey, we’ve initialized all potential things before we started trying to pull stuff out of it. So we’re not fragmenting when new webpack runtimes are attached to the host.
+The other option that I really do like is this concept of building out slots. Since you can have a delegate module in there, what you’re importing doesn’t actually have to mean anything anymore. Imagine if we just had a list of remotes and slots for remotes inside of zone one, through zone 50. There’s just slots for remotes and now you could say, Okay, this part of the header, I’m gonna call that zone one slot one. So now when I import zone, one slot one, kind of like, you know, a template or something that you’d have in a CMS, you could tell webpack, zone one slot one is assigned to the header team, and it’s the mega nav.
+Now webpack is still using import from and you have other static imports, but there’s slots and rather, you’re using this delegate module to assign meaning to those slots. Now that it’s aliased internally webpack knows I’m gonna need this right away, because it’s import from not a lazy import, so it can set up whatever it needs to. You can translate zone one goes to slot one. If we augment the little object that we’re sending back there, you can intercept it and go, okay, they just called the get method for slot one.
+That means and I know that the current remote is zone one. So then you can know, okay, if I’m in Zone One, I’m the header. So slot one is going to be mega nav so you can end up calling slot one, then get dot slash mega nav and resolve and return that container. It turns your site to just a bunch of slots and zones with nothing assigned to them. Then through a CMS or some kind of back end you could assign meaning to every slot on the page.
+Imagine if you were doing a/b testing, you have to create a zone where the test gets injected into. If you don’t know what’s happening, you could just import a whole bunch of import zone statements. If some of them don’t exist, then you just resolve them to nothing, but you could build out something like that where you could just say, there’s five possible things that could be here so import, zone 123 or zone 12345, slot 12345.
+If there’s three on the page, these are the three teams that we want them to be in the first, second and third slot. And the zone represents just a unique alias so that you can tell the bundler I want to remote that’s not a different remote. So there’s, you know, remote one remote to remote three. Okay, which remote do you want, remote one could be window mega nav. But webpack doesn’t care about its outer name, it only cares about the inner name that we bound it to. That inner name is all determined by creating a delegate module, that it’s completely detached from what you’re calling it internally.
+And all of this is going to become significantly easier with 7.
+Yeah, with 7, this is, I think, the only thing that’s really missing because it sounds it’s, these are more advanced concepts. But usually, when you do need something like this, you are approaching the upper bounds of the of the standard API, and you’re looking for a bit more power. I think what will really help here is to demonstrate some of the concepts because that’s where it’s probably going it’s going to be harder to adapt into is, well, what can we do? It looks cool, and it’s really interesting. But making sure the community fully realizes the scope of how you can do stuff like creating that zone slotted example, would be a really powerful one to say, hey, here’s a map, it’s a JSON file that we just get off the network. And all I have is a bunch of, you know, very, un specific imports throughout this application. And Webpack reads the JSON file to translate those nonspecific names into what actually should go there, like a schema. So you can basically just say, here’s a schema, I have a template that imports various things. Here’s the schema that’s now going to define what it is. kind of like you would do in Contentful, you create the schema, and then it sends it down, you have a loop that loops over kind of renders out the components according to whatever like, the Contentful schema is. But imagine having an import schema where your whole site is just zones of customization.
+I think one or two like examples of using delegates and various ways will help a lot, just to understand, Okay, well, these are, this is a different way of using it, that’s not immediately obvious. If you see two or three, like wildly different scenarios, it would probably be enough to spark okay, I get what I can do, I get the extent as to which I can change how I think about developing a system be dynamic and respond to things.
+Okay, and then when do you think 7 will be ready to go live.
+So right now, I’m busy working on the Medusa integration with version six. So 7, I’m leaving it in beta. Right now, if you want to use 7, there’s a section on delegate modules, you would just have to expand in the readme, and you can see how to do it.
+The code that’s in there is essentially going to become what is in the plugin. I’m going to just call create delegate module. From there I’m going to interpret if you use delegate module syntax, or whatever syntax, you’re passing the federation plugin is going to be reinterpreted into a delegate module. This it’s very similar to how we did it before where you’re @ syntax was converted into promise to promise.
+Instead of converting to promise new promise we’re going to convert it into something that’s more robust. If you use the little delegate module creation function, my internal one won’t get applied. Pretty much it’ll either be that federation resolves what you pass to one that’s generic instead of the webpack plugin, or it’ll be one that you point to, and it’s yours. If it’s yours, it’ll still provide the underlying utility, which is important delegated modules, that allows you to just pass it a global and URL and it will return a container.
+You can resolve that back to webpack, you don’t necessarily have to think too much about what’s going on. All the pieces are there for you. Timeline wise, the beta is pretty much there, I don’t think it’s going to change in its shape. I’m planning to try and roll that out maybe sometime this month, if possible. Before that, I have turned some focus to Medusa and seeing if Medusa is working with Next.js and updating the Medusa plugin. I’m working on verifying some wiring there. Medusa would use delegate modules anyway. It’s kind of built on the foundations of what’s already there.
+That’s kind of like my prioritization roadmap right now is. I want to ensure that the Medusa support is there, since we’re in this, and we’re putting all this together to actually work with it at Lululemon. Then if everything is happy, and it’s all good, I’ll probably make a few other slight adjustments to maybe some of the default options. I think one that might be good to turn on is the automatic async boundaries. So you know, the The pages dynamic import themselves and re export themselves all the time. So you won’t ever see an eager error.
+That would solve a lot of issues that get reported.
+Yeah, async boundaries that it’s currently a flag if you just flip it to true, it works. I need to do a little bit more work around the static analysis, because I have to understand what you’re exporting. When it’s evaluated? Does it have the getInitialProps, or getServerProps export? Is it a barrel export from somewhere else in a monorepo? Right now how my loader works is just looks at the current page, and it checks for a string called getServerProps, getStaticProps or getInitialProps, if it sees that string in your file, it will manufacture the Data Loader, along with the dynamic import boundary that it wraps around it.
+It’s quite important that if you’re using it, you still have to have that word somewhere in there, so that I can pick it up and kind of stamp out another equivalent for it. That’s the piece that still needs a bit of work. I would love to see it turned on by default, because that is the kind of prime way that you would want to utilize it, it would follow the same rules we do everywhere else in webpack, where you start with an import Bootstrap, and then everything else happens from there.
+Since the entry points and next are their pages, this is kind of what you would want each page to be a dynamic import to the actual page thing that you want it to do. Now everything that you share is protected behind the fact that it’s a dynamic import.
+I think a lot of people could learn from this.
+It’d be great to get this kind of stuff documented down. I still haven’t written anything about delegate modules, why they’re cool, and what you can do with them. It would be nice to have something that just goes into a bit more depth on it. This interview helped get a lot of the information out.
+What’s next? Planning to work on Rspack support for Module Federation?
+With Rspack coming out as well I mean we’ve already used webpack as our bundler tool for NPM packages. I’ve been looking at and I’m heavily considering using Rspack for everything that’s not Next.js at Lululemon. For all of our NPM package builds, we can just use Rspack because it’s gonna be super fast. Where we still have all the flexibility of webpack itself to build out these packages. There’s few bugs for webpacks ESM implementation that I have opened PRs for but they never have been merged.
+If those bugs were able to be fixed and Rspack that makes for an even stronger case for using it. Once federation lands in Rspack, it’s like, hey, it’s way faster and it has federation. It’s gonna be like the ES build of the webpack era stuff, it’s gonna be the super quick thing that you default to. It’s got some of the killer features that really offer it scalability. And I could see federation support also being one of the key things that help it stand out from Turbopack because we don’t know if Turbopack is going to implement federation or not. If somebody wanted a module federation friendly Turbopack Rspack is where you would go.
+It was really very interesting. Thank you!
+It was great to chat about this. I’m super excited to see what Delegate Modules and Rspack ends up unfolding for us. It’s gonna be great. Have a great day, cheers!
+Web Development for medium to large-size projects is a tortuous road that often requires complex dependencies and components management. Module Federation is a technology solution that solves the challenges that come with large scale and creates a streamlined development process.
+We’re going to explore the combination of Server-Side-Rendering and Module Federation with React 18 and Webpack. The solutions described here are not limited to the listed technologies and can be reused in various combinations with other tools.
+Multiple independent projects can work together as a single application using module federation. This makes it possible to design web applications more modularly, allowing several teams to work on various components of the program without having to be closely coupled. In the Module Federation, every project is a unique module that can consume and expose other modules.
+By rendering a JavaScript application on the server, sending the finished HTML to the browser, and then adding JavaScript to the HTML to make the program interactive, SSR creates a web page. This technique typically improves JavaScript application performance, which is especially helpful for people with sluggish internet connections or outdated hardware.
+We may use the same components and modules across various portions of the application and still keep the performance advantages of SSR when we combine Module Federation and SSR with React. This makes the development process more effective and enhances the user experience.
+There are several potential difficulties to take into account while utilizing Module Federation with SSR and React. Making sure the modules are correctly exposed and consumed by the host and remote apps is one of the problems. Additionally, managing the application’s state could be difficult, particularly if it needs to be displayed on the server before being hydrated on the client.
+Using libraries like @module-federation/nextjs-mf, which offers a set of utilities to handle the configuration and setup of the host and remote applications, developers can get around these difficulties. The state of the application should be handled correctly on the server and client, and developers should make sure that the modules are exposed and consumed effectively.
+Utilizing Module Federation with SSR and React can enhance the creation of online applications and the user experience, but it’s vital to take into account the difficulties and use the proper tools and libraries to handle the setup and configuration of the host and remote applications.
+mkdir my-app
+cd ./my-app
+npx create-next-app host
+cd host
+npm install --save @module-federation/nextjs-mf
+cd ../
+npx create-react-app remote
+cd remote
+npm install --save-dev webpack webpack-cli html-webpack-plugin webpack-dev-server babel-loader
+const HtmlWebpackPlugin = require('html-webpack-plugin');
+const { ModuleFederationPlugin } = require('webpack').container;
+const path = require('path');
+module.exports = {
+ entry: './src/index',
+ mode: 'development',
+ devServer: {
+ static: {
+ directory: path.join(__dirname, 'dist'),
+ },
+ port: 3001,
+ },
+ output: {
+ publicPath: 'http://localhost:3001/',
+ },
+ module: {
+ rules: [
+ {
+ test: /\.jsx?$/,
+ loader: 'babel-loader',
+ exclude: /node_modules/,
+ options: {
+ presets: ['@babel/preset-react'],
+ },
+ },
+ ],
+ },
+ plugins: [
+ new ModuleFederationPlugin({
+ name: 'remote',
+ library: { type: 'var', name: 'remote' },
+ filename: 'remote.js',
+ exposes: {
+ './Nav': './src/Nav',
+ },
+ shared: {
+ react: {
+ singleton: true,
+ version: '0',
+ requiredVersion: false,
+ },
+ 'react-dom': {
+ requiredVersion: false,
+ singleton: true,
+ version: '0',
+ },
+ },
+ }),
+ new HtmlWebpackPlugin({
+ template: './public/index.html',
+ }),
+ ],
+};
+// host/next.config.js
+const { NextFederationPlugin } = require('@module-federation/nextjs-mf');
+module.exports = {
+ webpack(config, options) {
+ if (!options.isServer) {
+ config.plugins.push(
+ new NextFederationPlugin({
+ name: 'host',
+ remotes: {
+ remote: 'remote@http://localhost:3001/remote.js',
+ },
+ filename: 'static/chunks/remoteEntry.js',
+ }),
+ );
+ }
+ return config;
+ },
+};
+//remote/src/Nav.js
+import React from 'react';
+const Nav = () => {
+ return (
+ <div>
+ This is my remote nav
+ <nav>
+ <a href="#">Home</a>
+ <a href="#">About</a>
+ <a href="#">Contact</a>
+ </nav>
+ </div>
+ )
+}
+export default Nav;
+//remote/index.js
+import('./bootstrap');
+//remote/bootstrap.js
+import React from 'react';
+import App from './App';
+import { createRoot } from 'react-dom/client';
+const container = document.getElementById('root');
+const root = createRoot(container);
+root.render(<App />);
+//remote/App.js
+import React from 'react';
+import Nav from './Nav';
+function App() {
+ return (
+ <div className="App">
+ <header className="App-header">
+ <Nav />
+ </header>
+ </div>
+ );
+}
+export default App;
+npm run build
+npm run start
+. . .
+"scripts": {
+ "start": "webpack-dev-server --config webpack.config.js",
+ "build": "webpack --mode production",
+ "clean": "rm -rf dist"
+},
+. . .
+npm run start
+// host/pages/index.js
+import dynamic from 'next/dynamic';
+const Nav = dynamic(() => import('remote/Nav'), { ssr: false });
+export default function HomePage() {
+ return (
+ <div>
+ This is my ssr host
+ <Nav />
+ </div>
+ );
+}
+With these steps, we have set up a basic application that uses Module Federation with SSR and React 18. The above code samples can be modified to match your specific use case. With Module Federation, it’s easy to share and reuse components across different parts of the application, making the development process more efficient.
+This project is available here.
+Official Module Federation(MF) partner, Valor is actively contributing to the MF ecosystem and unlocking new possibilities. + +Valor is also providing enterprise support, consulting, and team augmentation email us at sales@valor-software.com to learn how we can help.
+A couple of days ago, I faced an issue with Monitoring the Progress of an HTTP Request. According to old developers' tradition, firstly, I asked Google for it. I anticipated getting a bunch of different answers and choosing an appropriate one. This time my instincts failed me. Even though I got a bunch of similar solutions, I didn’t find the appropriate example. It is worth clarifying that I’m working on a NestJS-based project. Let me explain why I decided to create my solution from scratch and why most of the existing solutions on the topic need to be revised in my case.
+First, I want to share the article that describes a bunch of the results above as well as possible. Let me provide essential thoughts on the article.
+The article describes the request that provides content downloading. In this case, we are talking about the precious content size.
The Content-Length HTTP header is essential to the correct HTTP response.
After the server application sets Content-Length, chunked data writing process should be run.
First, the client application gets Content-Length.
After that, it gets every data chunk and calculates the progress as the following.
+progress = 100 * (chunkSize / contentLength)
+The approach above is beneficial if we are talking about content downloading. Despite it doesn’t work in my case due to the following reasons.
+My task is about something other than content downloading. Moreover, we need to have a functionality that allows us to calculate the progress according to calculations, not only according to data transfer.
Despite the application not knowing the content size, it has a total number of iterations.
Chunk-based approach doesn’t work in this case. The final result preparation will take a long time, and the output data should be written to the response simultaneously. That’s why we need to inform the client before sending a response.
+In other words, the requirements for the new approach are the following.
+The response writing goes simultaneously without any data chunking.
The progress should be provided before that.
I don’t want to waste your time and give a couple of conceptual points of my approach regarding accounting the requirements above.
+Provide the progress via WebSockets because of persistent connection and high performance.
+Connect WebSockets with a current session to pass all needed data from the HTTP request processing process.
+All thoughts and code below will be strongly connected to these points. But before, let me share the final solution.
+I provided a simplified version of the data processing because I want to focus on this task. We have 150 iterations in the example below. The result is an array of 150 random numbers, each calculates in 100 - 1000 milliseconds. I found this example as a minimally viable model of the objective process.
+import { Injectable } from '@nestjs/common';
+
+const getRandomArbitrary = (min: number, max: number): number =>
+ Math.random() * (max - min) + min;
+const delay = (time: number) =>
+ new Promise((resolve) => setTimeout(resolve, time));
+
+@Injectable()
+export class AppService {
+
+ getIterationCount(): number {
+ return 150;
+ }
+
+ async getData(token: string): Promise<string[]> {
+ return new Promise(async (resolve, reject) => {
+ try {
+ const result = [];
+
+ for (let i = 0; i < this.getIterationCount(); i++) {
+ result.push(getRandomArbitrary(1, 9999));
+ await delay(getRandomArbitrary(100, 1000));
+ }
+
+ resolve(result);
+ } catch (e) {
+ reject(e);
+ }
+ });
+ }
+}
+The future steps are regarding the ProgressManager implementation.
+The ProgressManager should be a separate NestJS service able to do the following.
+Start the "Progress" session (Not the HTTP session) with the unique token taken from the client application.
Stop the "Progress" session
Increase the value of the progress.
Please look at the following commented code.
+import { Injectable } from '@nestjs/common';
+import { Server } from 'socket.io';
+
+export interface ProgressSession {
+ token: string;
+ total: number;
+ counter: number;
+ timerId: any;
+}
+
+@Injectable()
+export class ProgressManager {
+ // The Socket Server injection will be described later
+ public server: Server;
+ // This map contains all Progress session data
+ private storage: Map<string, ProgressSession> = new Map();
+
+ // Start the session with the token and the total number of iterations
+ startSession(token: string, total: number, delay = 2000) {
+ // Get current session from the storage
+ const currentSession = this.storage.get(token);
+ // Do nothing if it's already exist
+ if (currentSession) {
+ return;
+ }
+ // Send the progress every "delay" milliseconds
+ const timerId = setInterval(async () => {
+ const currentSession: ProgressSession = this.storage.get(token);
+ // Protect the functionality: if the current session is missing then do nothing
+ if (!currentSession) {
+ return;
+ }
+ // Calculate the progress
+ let progress = Math.ceil(
+ (currentSession.counter / currentSession.total) * 100
+ );
+ // Protect the progress value, it should be less or equal 100
+ if (progress > 100) {
+ progress = 100;
+ }
+ // Send the progress. Pay attention that the event name should contain the "token"
+ // Client will use this token also
+ this.server.emit(`progress-${token}`, progress);
+ }, delay);
+ // Initial Progress Session settings. Token is a key.
+ this.storage.set(token, {
+ token,
+ total,
+ counter: 0,
+ timerId,
+ });
+ }
+
+ // This method increases the progress
+ step(token: string, value = 1) {
+ // Get the current session
+ const currentSession: ProgressSession = this.storage.get(token);
+ // Do nothing if it doesn't exist
+ if (!currentSession) {
+ return;
+ }
+ // Increase the counter
+ const counter = currentSession.counter + value;
+ // Update the storage
+ this.storage.set(token, {
+ ...currentSession,
+ counter,
+ });
+ }
+
+ // Stop the session by the token
+ stopSession(token: string) {
+ // Get the current session
+ const currentSession: ProgressSession = this.storage.get(token);
+ // Do nothing if it doesn't exist
+ if (currentSession) {
+ // Stop the current timer
+ clearInterval(currentSession.timerId);
+ // Remove information regarding the current session from the storage
+ this.storage.delete(token);
+ }
+ }
+}
+You can find the code above here.
+Another important is the integration of NestJS with WebSockets and connecting the Progress Manager with it. The following code is responsible for that.
+import {
+ WebSocketGateway,
+ WebSocketServer,
+ OnGatewayInit,
+} from '@nestjs/websockets';
+import { Server } from 'socket.io';
+import { ProgressManager } from './progress-manager';
+
+@WebSocketGateway({ cors: true })
+export class AppGateway implements OnGatewayInit {
+ constructor(private progressManager: ProgressManager) {}
+
+ @WebSocketServer() server: Server;
+
+ afterInit() {
+ // After the WebSockets Gateway has to init, then pass it to the ProgressManager
+ this.progressManager.server = this.server;
+ }
+}
+The source >>
+And, of course, according to NestJS requirements, we need to tell the related module about that.
import { Module } from '@nestjs/common';
+
+import { AppController } from './app.controller';
+import { AppService } from './app.service';
+import { AppGateway } from './app.gateway';
+import { ProgressManager } from './progress-manager';
+
+@Module({
+ imports: [],
+ controllers: [AppController],
+ providers: [AppService, AppGateway, ProgressManager],
+})
+export class AppModule {}
+It’s time to focus on the endpoint’s controller. It looks pretty simple.
+import { Controller, Get, Query } from '@nestjs/common';
+import { AppService } from './app.service';
+
+@Controller()
+export class AppController {
+ constructor(private readonly appService: AppService) {}
+
+ @Get()
+ getData(@Query() query: { token: string }) {
+ return this.appService.getData(query.token);
+ }
+}
+And the last thing about the server is regarding the Data Providing Example modification. The following example is close to the first example in this article. The main aim is to add "Progress functionality" here. Please, read the comment in the code. It’s important.
+import { Injectable } from '@nestjs/common';
+import { ProgressManager } from './progress-manager';
+
+const getRandomArbitrary = (min: number, max: number): number =>
+ Math.random() * (max - min) + min;
+const delay = (time: number) =>
+ new Promise((resolve) => setTimeout(resolve, time));
+
+@Injectable()
+export class AppService {
+ // Use progressManager
+ constructor(private readonly progressManager: ProgressManager) {}
+
+ // 150 iterations should be processed
+ getIterationCount(): number {
+ return 150;
+ }
+
+ async getData(token: string): Promise<string[]> {
+ return new Promise(async (resolve, reject) => {
+ // We need to start the Progress Session before data preparation
+ this.progressManager.startSession(token, this.getIterationCount());
+ try {
+ // Initialize the array of results
+ const result = [];
+
+ for (let i = 0; i < this.getIterationCount(); i++) {
+ // Calculate the result
+ result.push(getRandomArbitrary(1, 9999));
+ // Increase the Progress counter
+ this.progressManager.step(token);
+ // Random delay
+ await delay(getRandomArbitrary(100, 1000));
+ }
+
+ // Return the result
+ resolve(result);
+ } catch (e) {
+ reject(e);
+ } finally {
+ // We need to stop the ProgressManager in any case.
+ // Otherwise, we have a redundant timeout.
+ this.progressManager.stopSession(token);
+ }
+ });
+ }
+}
+The backend part of my example is ready. You can find the full backend solution here.
+The client part of my example is placed here. Both parts are placed in one monorepo. Thanks Nx for that. Lets look at it. Please, read the comments in the code below.
+import * as io from 'socket.io-client';
+import { v4 } from 'uuid';
+import axios from 'axios';
+
+// Generate a unique ID (token)
+const token = v4();
+
+console.info(new Date().toISOString(), `start the request`);
+
+// Call the endpoint described above
+axios
+ .get(`http://localhost:3333/api?token=${token}`)
+ .then((resp) => {
+ // Print the total length of requested data (an array of random numbers)
+ console.info(new Date().toISOString(), `got ${resp.data.length} records`);
+ process.exit(0);
+ })
+ .catch((e) => {
+ console.info(e);
+ process.exit(0);
+ });
+// We need to connect to the related Socket Server
+const ioClient = io.connect('ws://localhost:3333');
+// And wait for `progress-${token}` event
+ioClient.on(`progress-${token}`, (progress) =>
+ console.info(new Date().toISOString(), `processed ${progress}%`)
+);
+It’s time to try the solution.
+git clone git@github.com:buchslava/nest-request-progress.git
+cd nest-request-progress
+npm i
+npx nx run server:serve
+Open another terminal and run:
+npx nx run client:serve
+As a full-stack developer, I always try to gain new knowledge. I +heard about Rust Programming Language some years ago, but hands did not reach out to try real examples. I spent some time on Rust a couple of days ago, and I want to share some experiences with you.
+First, I want to note that this article is suitable mainly for NodeJS/Javascript developers who didn’t hear about Rust before or for those who are trying the first steps.
+Second, I’ll describe only one issue I tackled, and I don’t aim to provide you with the ultimate set of knowledge. Despite that, I hope my example will engage you regarding learning Rust.
+I suppose, at this point, you ask me a question. As a Web developer, why should I learn Rust? I predicted this question. Honestly, this article is a logical continuation of my previous one.
+It’s time to tell you a couple of words about Rust.
+Wikipedia tells us about Rust the following.
+"Rust is a multi-paradigm, high-level, general-purpose programming language. Rust emphasizes performance, type safety, and concurrency. Rust enforces memory safety—that is, that all references point to valid memory—without requiring the use of a garbage collector or reference counting present in other memory-safe languages. To simultaneously enforce memory safety and prevent concurrent data races, Rust’s "borrow checker" tracks the object lifetime of all references in a program during compilation. Rust is popular for systems programming but also offers high-level features including some functional programming constructs. Software developer Graydon Hoare created Rust as a personal project while working at Mozilla Research in 2006. Mozilla officially sponsored the project in 2009. Since the first stable release in May 2015, Rust has been adopted by companies including Amazon, Discord, Dropbox, Facebook (Meta), Google (Alphabet), and Microsoft."
+Don’t think about Rust as a Javascript-similar one. This language is entirely different. Please, pay attention to the points below.
+Rust is a compilable language.
+Despite its general purpose, it mostly looks like a competitor in C++ (even C). If you are Golang-familiar folk, please, don’t compare Rust with Golang! They are different too.
+One of the main features is safe Multi-Threading.
+References and Borrowing topic could be a bit difficult for Javascript folk. Please, focus on it!
+Please, read this resource.
+I want to focus on safe Multi-Threading in this article because understanding this feature is the number one way to understand this beautiful language. I also understand that the safe Multi-Threading topic is one of the trickiest in languages outside Rust. Java, Goalng, and C++ are good examples. Rust language has a large community, and there are many resources ( this one, for example) about it. But I was faced with a lack of practical examples and simple explanations. Despite that, I found one classic example that helps you to enter this topic as quickly as possible.
+Dining Philosophers Problem is a classic Multi-Threading task described here.
+"Five silent philosophers sit at a round table with bowls of spaghetti. Forks are placed between each pair of adjacent philosophers. Each philosopher must alternately think and eat. However, a philosopher can only eat spaghetti when they have both left and right forks. Each fork can be held by only one philosopher and so a philosopher can use the fork only if it is not being used by another philosopher. After an individual philosopher finishes eating, they need to put down both forks so that the forks become available to others. A philosopher can take the fork on their right or the one on their left as they become available, but cannot start eating before getting both forks. Eating is not limited by the remaining amounts of spaghetti or stomach space; an infinite supply and an infinite demand are assumed. The problem is how to design a discipline of behavior (a concurrent algorithm) such that no philosopher will starve; i.e., each can forever continue to alternate between eating and thinking, assuming that no philosopher can know when others may want to eat or think."
+Initially, Rust’s solution looked like the one below. You can find it here
+use std::sync::{Arc, Mutex};
+use std::{thread, time};
+
+struct Philosopher {
+ name: String,
+ left: usize,
+ right: usize,
+}
+
+impl Philosopher {
+ fn new(name: &str, left: usize, right: usize) -> Philosopher {
+ Philosopher {
+ name: name.to_string(),
+ left: left,
+ right: right,
+ }
+ }
+
+ fn eat(&self, table: &Table) {
+ let _left = table.forks[self.left].lock().unwrap();
+ let _right = table.forks[self.right].lock().unwrap();
+
+ println!("{} is eating.", self.name);
+
+ let delay = time::Duration::from_millis(1000);
+
+ thread::sleep(delay);
+
+ println!("{} is done eating.", self.name);
+ }
+}
+
+struct Table {
+ forks: Vec<Mutex<()>>,
+}
+
+fn main() {
+ let table = Arc::new(Table {
+ forks: vec![
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ ],
+ });
+
+
+ let philosophers = vec![
+ Philosopher::new("Donald", 0, 1),
+ Philosopher::new("Larry", 1, 2),
+ Philosopher::new("Mark", 2, 3),
+ Philosopher::new("John", 3, 4),
+ Philosopher::new("Bruce", 0, 4),
+ ];
+
+ let handles: Vec<_> = philosophers
+ .into_iter()
+ .map(|p| {
+ let table = table.clone();
+
+ thread::spawn(move || {
+ p.eat(&table);
+ })
+ })
+ .collect();
+
+ for h in handles {
+ h.join().unwrap();
+ }
+}
+I don’t want to dig into the code above a lot, and I guess you will enjoy doing it from your side. But I need to focus on some essential points.
+It’s no secret that the main Multi-Threading task is about preventing data collisions. In our example, collision means neighbor philosophers simultaneously took the same fork because each philosopher has his thread and is eating and thinking simultaneously with others. Mutex tackles it. Mutex means mutual exclusion, "only one at a time." That’s why forks association with the related mutexes is a great idea.
+At this point, I’d like to interrupt my story and tell you one important note. Honestly, this example is more complicated than I’m explaining it. I just aim to excite new Rust folks. Sorry for telling you that, dear experts. Despite Mutex being a golden concurrency standard, it’s not a panacea. Some issues are possible even here. I’ll provide more helpful information about that in the article’s The Roadmap part. Please, read carefully about "Deadlock," "Livelock," and "Starvation."
+The related code is the following.
+ let table = Arc::new(Table {
+ forks: vec![
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ ],
+ });
+and
+ let _left = table.forks[self.left].lock().unwrap();
+ let _right = table.forks[self.right].lock().unwrap();
+A philosopher takes a couple of forks and holds them.
+What happens with his neighbors at this moment? +They want to take a fork. But the fork is already taken.
+What happens in this case with the neighbors? +They (means related threads) are waiting when the first philosopher releases the forks (unlocks his mutexes).
+Why are they waiting? +Because of mutexes!
+Look at the code below.
+ fn eat(&self, table: &Table) {
+ let _left = table.forks[self.left].lock().unwrap();
+ let _right = table.forks[self.right].lock().unwrap();
+
+ println!("{} is eating.", self.name);
+
+ let delay = time::Duration::from_millis(1000);
+
+ thread::sleep(delay);
+
+ println!("{} is done eating.", self.name);
+ }
+When the philosopher leaves the forks?
+He grabs the forks and waits for 1 second. The mutexes will be released after the eat function is completed. +Other lucky neighbors (right and left) grab the related forks. Pay attention to neighbors running in separate threads (i.e., simultaneously).
+Also, let’s look at the following code directly related to multi-threading.
+ let handles: Vec<_> = philosophers
+ .into_iter()
+ .map(|p| {
+ let table = table.clone();
+
+ thread::spawn(move || {
+ p.eat(&table);
+ })
+ })
+ .collect();
+
+ for h in handles {
+ h.join().unwrap();
+ }
+It’s time to run the original example.
+git clone git@github.com:buchslava/dining-philosophers-problem.git
+cd dining-philosophers-problem
+git checkout original-version
+cargo build
+./target/debug/dining-philosophers
+I asked myself. +Is it possible to collect all results during the program execution (I mean print messages) and provide them immediately after the whole logic has been completed?
+I found this task challenging because I know from other technologies that cross-thread interaction is always painful. As a Javascript folk, I first thought about something like Promise.all technique.
+Surprisingly, I found a similar technique in YOSHUA WUYTS blog. I strongly recommend reading this resource.
+Please, look at the following article.
+Moreover, I found the following table pleasant for Javascript folk.
+JavaScript |
+Rust |
+description |
+
Promise.allSettled |
+future::join |
+does not short-circuit |
+
Promise.all |
+future::try_join |
+short-circuits when an input value is rejected |
+
Promise.race |
+future::select |
+short-circuits when an input value is settled |
+
Promise.any |
+future::try_select |
+short-circuits when an input value is fulfilled |
+
According to the information above, the new solution should look like the one below.
+use async_std::future;
+
+let a = future::ready(Ok(1));
+let b = future::ready(Ok(2));
+
+let c = future::try_join(a, b);
+assert_eq!(c.await?, (1, 2));
+As an impatient learner, I looked at the futures approach and the origin example at the same time. I was slightly disappointed because I couldn’t reimplement the original version in a future-based way. Honestly, I don’t have enough time for that. In addition, according to digging results, the future’s concept is drastically different from threads. That’s why I’m not sure this idea is good. I postponed this activity.
+I decided to choose another way. I researched more and found Using Message Passing to Transfer Data Between Threads.
+I think you would agree the way below is most friendly to the original Dinning Philosophers implementation.
+use std::sync::mpsc;
+use std::thread;
+
+fn main() {
+ let (tx, rx) = mpsc::channel();
+
+ thread::spawn(move || {
+ let val = String::from("hi");
+ tx.send(val).unwrap();
+ });
+
+ let received = rx.recv().unwrap();
+ println!("Got: {}", received);
+}
+It’s time to move forward and modify our solution. I want to provide the complete source code and explain it step by step.
+use std::sync::{Arc, Mutex};
+use std::{thread, time};
+use std::sync::mpsc::{Sender};
+use std::sync::mpsc;
+
+struct Philosopher {
+ name: String,
+ left: usize,
+ right: usize,
+}
+
+impl Philosopher {
+ fn new(name: &str, left: usize, right: usize) -> Philosopher {
+ Philosopher {
+ name: name.to_string(),
+ left: left,
+ right: right,
+ }
+ }
+
+ fn eat(&self, table: &Table, sender: &Sender<String>) {
+ let _left = table.forks[self.left].lock().unwrap();
+ let _right = table.forks[self.right].lock().unwrap();
+
+ // println!("{} is eating.", self.name);
+ sender.send(format!("{} is eating.", self.name).to_string()).unwrap();
+
+ let delay = time::Duration::from_millis(1000);
+
+ thread::sleep(delay);
+
+ // println!("{} is done eating.", self.name);
+ sender.send(format!("{} is done eating.", self.name).to_string()).unwrap();
+ }
+}
+
+struct Table {
+ forks: Vec<Mutex<()>>,
+}
+
+fn main() {
+ let (tx, rx) = mpsc::channel();
+ let table = Arc::new(Table {
+ forks: vec![
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ ],
+ });
+
+
+ let philosophers = vec![
+ Philosopher::new("Donald", 0, 1),
+ Philosopher::new("Larry", 1, 2),
+ Philosopher::new("Mark", 2, 3),
+ Philosopher::new("John", 3, 4),
+ Philosopher::new("Bruce", 0, 4),
+ ];
+
+ let handles: Vec<_> = philosophers
+ .into_iter()
+ .map(|p| {
+ let table = table.clone();
+ let sender = tx.clone();
+
+ thread::spawn(move || {
+ p.eat(&table, &sender);
+ })
+ })
+ .collect();
+
+ for h in handles {
+ h.join().unwrap();
+ }
+
+ tx.send("Done".to_string()).unwrap();
+
+ let mut result: String = String::from("");
+
+ for received in rx {
+ if received == "Done" {
+ break;
+ }
+ result.push_str(&received);
+ result.push_str("\n");
+ }
+ println!("{}", result);
+}
+use std::sync::mpsc::{Sender};
+use std::sync::mpsc;
+fn main() {
+ let (tx, rx) = mpsc::channel();
+ // ...
+}
+ thread::spawn(move || {
+ p.eat(&table, &sender);
+ })
+fn eat(&self, table: &Table, sender: &Sender<String>) {
+ let _left = table.forks[self.left].lock().unwrap();
+ let _right = table.forks[self.right].lock().unwrap();
+
+ // println!("{} is eating.", self.name);
+ sender.send(format!("{} is eating.", self.name).to_string()).unwrap();
+
+ let delay = time::Duration::from_millis(1000);
+
+ thread::sleep(delay);
+
+ // println!("{} is done eating.", self.name);
+ sender.send(format!("{} is done eating.", self.name).to_string()).unwrap();
+ }
+ for h in handles {
+ h.join().unwrap();
+ }
+
+ tx.send("Done".to_string()).unwrap();
+
+ let mut result: String = String::from("");
+
+ for received in rx {
+ if received == "Done" {
+ break;
+ }
+ result.push_str(&received);
+ result.push_str("\n");
+ }
+ println!("{}", result);
+Pay attention to the "Done" message. This is a criterion of the end of the process.
+It’s time to run the final solution.
+git checkout main
+cargo build
+./target/debug/dining-philosophers
+Looks good!
+As promised, I will provide some crucial additions to this task.
+A deadlock can occur in this task when all philosophers hold exactly one single fork. This is currently being worked around in the article by ignoring the the philosophers sit at a round table: Since Donald and Bruce share the same left fork (0), one of them will never be able to hold one fork if the other holds any number of forks (due to constantly trying left first). +If we consider the round table part, Bruce’s left fork should be 4, and his right fork should be 0:
+Philosopher::new("Bruce", 4, 0),
+Now, this introduces a possible deadlock. It’s tough to replicate, but it’s easy to replicate once we implement the "an infinite supply and an infinite demand is assumed" part. As in infinite time, we will eventually hit a deadlock where all philosophers will be stuck thinking forever.
+An easy way to reproduce it is to add a 5ms wait on every philosopher’s left fork pickup; it will deadlock.
+use std::sync::{Arc, Mutex};
+use std::{thread, time};
+
+struct Philosopher {
+ name: String,
+ left: usize,
+ right: usize,
+}
+
+impl Philosopher {
+ fn new(name: &str, left: usize, right: usize) -> Philosopher {
+ Philosopher {
+ name: name.to_string(),
+ left: left,
+ right: right,
+ }
+ }
+
+ fn eat(&self, table: &Table) {
+ println!("{} is picking up the left fork.", self.name);
+ let _left = table.forks[self.left].lock().unwrap();
+
+ // added 5ms duration
+ thread::sleep(time::Duration::from_millis(5));
+
+ println!("{} is picking up the right fork.", self.name);
+ let _right = table.forks[self.right].lock().unwrap();
+
+ println!("{} is eating.", self.name);
+
+ let delay = time::Duration::from_millis(1000);
+
+ thread::sleep(delay);
+
+ println!("{} is done eating.", self.name);
+ }
+}
+
+struct Table {
+ forks: Vec<Mutex<()>>,
+}
+
+fn main() {
+ let table = Arc::new(Table {
+ forks: vec![
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ ],
+ });
+
+
+ let philosophers = vec![
+ Philosopher::new("Donald", 0, 1),
+ Philosopher::new("Larry", 1, 2),
+ Philosopher::new("Mark", 2, 3),
+ Philosopher::new("John", 3, 4),
+ // changed from Philosopher::new("Bruce", 0, 4),
+ Philosopher::new("Bruce", 4, 0),
+ ];
+
+ let handles: Vec<_> = philosophers
+ .into_iter()
+ .map(|p| {
+ let table = table.clone();
+
+ thread::spawn(move || {
+ p.eat(&table);
+ })
+ })
+ .collect();
+
+ for h in handles {
+ h.join().unwrap();
+ }
+}
+You can find a working (but really not working…) example here.
+As a concurrency folk, you should start thinking about Deadlock, Livelock, and Starvation. Please, read about it here.
+Tackling Deadlock, Livelock, and Starvation is not easy, and there is no silver bullet here. Despite you can dig into different existing solutions on the topic. This one, for example.
+Also, it would be best if you learned Atomic Calculations. To become the Ninja of Concurrency, start digging from Rust Atomics and Locks Low-Level Concurrency in Practice.
+Finally, I will satisfy your curiosity and give you a NodeJS version of the solution. It was taken from here and corrected a bit…
+You could read and run it.
+cd nodejs
+node index
+I hope it will also widen your NodeJS horizons.
+PS: Thanks to Eduardo Speroni for very useful comments and for helping during my work on the article.
+It’s no secret that NodeJS solutions are not so performative, especially if we consider a solution with a bunch of synchronous operations or vice versa - we work with a tricky multi-thread solution. A good example is image processing or cipher. Despite some performance issues, NodeJS retains its reputation as a mainstream. Moreover, NodeJS tries to be more flexible. A powerful NodeJS Addons functionality allows developers to write some NodeJS functionalities on C++. Node.js with Rust became popular last time. I meant this technique because I will discuss Rust programming language integration with NodeJS. Why Rust? It’s a good question… I want to provide some essential facts regarding Rust briefly.
+Memory-safe approach preventing memory leaks.
+Type-safe syntax control.
+No "Data race" issue owing to concurrency management.
+Programs are compiled in the "ahead-of-time" manner.
+Utilizes and promotes zero-cost abstractions.
+No resource-consuming "garbage collectors", no JIT compiler, no virtual machine.
+Minimal runtime and memory footprint.
+Very good dependency management tool.
+Helpful compiler errors with clear and doable recommendations.
+Apart from that, Rust is multithread friendly and it has a much simpler syntax compared with C/C++.
+You can find the following resource valuable. This resource will persuade you regarding Rust Performance.
+It’s еasy to see that the Rust integration described above is a bit difficult. Fortunately, evolution does not come to a halt. Today I’m glad to introduce a new Animal to our Technological Zoo.
+NAPI-RS is a framework for building pre-compiled Node.js addons in Rust.
+Let’s jump off the bat!
+Of course, the article aims to introduce you napi-rs as the easiest way to integrate NodeJS with Rust. The best way to do it is to provide a more complicated example than a standard one.
+I will provide a NodeJS application that gets a file, uploads it, and transforms it afterward. Let’s say it is reducing the saturation. The image operation above should be provided on the Rust side.
+But before that, let’s try the standard functionality.
+First, you need to install Rust. Cargo builder is included there.
+Second, I recommend creating a new project via the following template. +Third, yarn is recommended here.
+It’s time to cover all essential points.
+yarn install
+yarn build
+Cargo.toml contains all information regarding the Rust package, including dependencies. This file is similar to package.json in NodeJS.
+src/lib.rs
+The file above contains Rust-defined functions for future exporting. In this example, a defined function #plus_100 #adds 100 to the input parameter.
+#![deny(clippy::all)]
+
+use napi_derive::napi;
+
+#[napi]
+pub fn plus_100(input: u32) -> u32 {
+ input + 100
+}
+It’s obvious to see package.json and other JS stuff here because we are talking about Rust and NodeJS integration. package.json contains required dependencies like @napi-rs/cli that allow you to build the solution. Also, pay attention to the following files.
+./index.js
+This file contains your library binding with its exporting. Please look at the last lines of code.
+const { plus100 } = nativeBinding;
+
+module.exports.plus100 = plus100;
+Do you remember Rust’s plus100 definition above? These lines +precisely represent a bridge between Rust and NodeJS.
+./index.d.ts
+This file contains Typescript definitions (signatures) of your Rust functionality.
+/* tslint:disable */
+/* eslint-disable */
+
+/* auto-generated by NAPI-RS */
+
+export function plus100(input: number): number
+Important note! You shouldn’t edit the files above because they are autogenerated and change every Rust definition update after completing the yarn build command.
+./simple-test.js
+The following code illustrates how to run a Rust-defined function. Pay attention to the first line. You should import the function from ./index.js (see above).
+const { plus100 } = require("./index");
+
+console.assert(plus100(0) === 100, "Simple test failed");
+
+console.info("Simple test passed");
+Let’s run it.
+node simple-test
+After we are sure your solution works well, let’s make the solution image-processing friendly. Let’s pass the following steps.
+Change ./Cargo.toml
+[lib]
+crate-type = ["cdylib"]
+path = "lib/lib.rs"
+path = "lib/lib.rs" has been added. Now we use the lib folder instead src for Rust code. src folder could be reserved for future JS/TS code. Let’s remove the src folder for now.
+First, install the expected Rust dependency (image package).
+cargo add image
+Second, create lib/lib.rs
+#![deny(clippy::all)]
+
+use image::{GenericImageView, ImageBuffer, Pixel};
+
+use napi_derive::napi;
+
+#[napi]
+pub fn darker(filename: String, saturation: u8) {
+ let img = image::open(filename.clone()).expect("File not found!");
+ let (w, h) = img.dimensions();
+ let mut output = ImageBuffer::new(w, h);
+
+ for (x, y, pixel) in img.pixels() {
+ output.put_pixel(x, y, pixel.map(|p| p.saturating_sub(saturation)));
+ }
+
+ output.save(filename).unwrap();
+}
+#[napi] attribute is a marker that the function should be used in JS/TS code.
+The function above takes the filename and saturation, reads the file, applies the saturation, and rewrites the file.
+Let’s rebuild…
+yarn build
+As a result, index.js and index.d.ts should be updated.
+Copy this picture to the root of the project.
+Also, let’s change simple-test.js
+const { darker } = require("./index");
+
+darker("./cube.png", 50);
+It’s time to run it.
+node simple-test
+Or run the commands below if you want to reproduce all the steps from the start.
+git clone git@github.com:buchslava/napi-rs-images.git
+cd napi-rs-images
+yarn
+yarn build
+node simple-test
+Look at the following changes
+Our Rust part is ready and it’s time to implement a web application that allows us to upload/desaturate the file and show it after.
+If you want to try the application immediately you can play with napi-rs images. Otherwise, please read my explanations below.
+First we need to install expected NodeJS dependencies.
+yarn add ejs
+yarn add express
+yarn add express-ejs-layouts
+yarn add express-fileupload
+yarn add uuid
+Make storage folder under the root of the project and add it to ./.gitignore.
+Add the ./server.js to the root of the project.
+const fs = require("fs");
+const path = require("path");
+
+const express = require("express");
+const ejsLayouts = require("express-ejs-layouts");
+const fileUpload = require("express-fileupload");
+const uuidv4 = require("uuid").v4;
+
+const { darker } = require("./index");
+
+const STORAGE_DIR = "storage";
+
+const app = express();
+
+app.use(fileUpload());
+app.set("view engine", "ejs");
+app.use(ejsLayouts);
+app.use("/storage", express.static(path.join(__dirname, STORAGE_DIR)));
+app.use(express.urlencoded({ extended: true }));
+
+app.get("/", async (req, res) => {
+ let files = await fs.promises.readdir(path.join(__dirname, STORAGE_DIR));
+ files = files
+ .map((fileName) => ({
+ name: fileName,
+ time: fs
+ .statSync(path.join(__dirname, STORAGE_DIR, fileName))
+ .mtime.getTime(),
+ }))
+ .sort((a, b) => a.time - b.time)
+ .map((v) => v.name);
+ return res.render("upload", { files: files.reverse() });
+});
+
+app.post("/uploads", function (req, res) {
+ const file = req.files.upload;
+ const extname = path.extname(file.name);
+ const uuid = uuidv4();
+ const filePath = path.join(__dirname, STORAGE_DIR, `${uuid}${extname}`);
+
+ file.mv(filePath, (err) => {
+ if (err) {
+ return res.status(500).send(err);
+ }
+ try {
+ darker(filePath, +req.body.saturation);
+ } catch (e) {
+ return res.status(500).send(e);
+ }
+ res.redirect("/");
+ });
+});
+
+app.listen(3000);
+Also, add "start": "node server", to the scripts section in ./package.json.
+I don’t want to explain many of the solutions above because it’s obvious for a NodeJS folk. I just want to pay attention to the points below.
+There are two endpoints: / and /upload.
+/ provides us with an upload form and a list of the uploaded and desaturated images.
+/upload uploads and desaturates an uploaded image and redirects to /.
+Also, please look at image desaturation
+try {
+ darker(filePath, +req.body.saturation);
+} catch (e) {
+ return res.status(500).send(e);
+}
+and the fact that we take the Saturation Value from the request +req.body.saturation as a number, and
+let files = await fs.promises.readdir(path.join(__dirname, STORAGE_DIR));
+files = files
+ .map((fileName) => ({
+ name: fileName,
+ time: fs
+ .statSync(path.join(__dirname, STORAGE_DIR, fileName))
+ .mtime.getTime(),
+ }))
+ .sort((a, b) => a.time - b.time)
+ .map((v) => v.name);
+return res.render("upload", { files: files.reverse() });
+where STORAGE_DIR is storage (see above) and we pass a sorted list of the uploaded files to the related EJS template.
+Related EJS templates are below.
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+
+ <link
+ href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css"
+ rel="stylesheet"
+ integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3"
+ crossorigin="anonymous"
+ />
+ <title>Uploads</title>
+ </head>
+ <body>
+ <%- body %>
+ </body>
+</html>
+<div class="container">
+ <form
+ class="w-50 mx-auto my-3"
+ action="/uploads"
+ method="post"
+ enctype="multipart/form-data"
+ >
+ <div class="mb-3">
+ <input class="form-control" type="file" name="upload" required />
+ </div>
+ <div class="w-50 d-flex form-outline align-middle">
+ <label class="form-label text-nowrap pr-3" for="saturation"
+ >% saturation </label
+ >
+ <input
+ name="saturation"
+ value="65"
+ type="number"
+ id="saturation"
+ class="form-control"
+ />
+ </div>
+ <button class="btn btn-primary">Upload</button>
+ </form>
+
+ <div class="container">
+ <% for (const file of files){ %>
+ <div class="row mb-3">
+ <img src="/storage/<%= file %>" class="card-img-top" alt="Image" />
+ </div>
+ <% } %>
+ </div>
+</div>
+It’s time to test the whole solution.
+yarn start
+and try http://localhost:3000
+Finally, let’s upload a couple of images.
+I guess you will satisfy your curiosity about performance if you upload and process bigger images.
+In conclusion, I want to mean a fact from here.
+"One nice feature is that this crate allows you to build add-ons purely with the Rust/JavaScript toolchain and without involving node-gyp."
+That’s like music to the ears of Node Folks.
+Manage federated applications and modules, control explicit versions of any previous build or commit per consuming app, track the performance and many more!
+Become an early bird subscriber for Medusa – a dashboard for federated apps.
+Intro to Module Federation with Medusa: First look
+Webpack Module Federation: going in-depth with Zack Jackson
+Federated Modules Dashboard: the preview video
+A lot has happened since we introduced the qwik-nx, an official plugin for the Qwik framework. As Qwik breaks free by reaching the v1, we’re happy to announce the release of the stable version of the Nx integration for it.
+Qwik is referred to as a next-generation framework because of its revolutionary new approaches to performance optimization, among which:
+JavaScript Streaming - an ability to load and execute only the bare minimum of the code as Qwik analyses and splits your code into chunks up to a function level. Those chunks are prefetched in a separate thread, similar to "buffering" in video streaming. The prefetched code only executes upon user action;
+resumability - a new rendering paradigm to bring you instantly interactive apps by serializing/deserializing your app as it goes from SSR to client without the need for hydration;
+In practise this means that you get an app with O(1) size, as its initial bundle is constant at about 1kb no matter the application complexity.
+This is extremely beneficial for enterprise companies, that usually want to bring an enormous amount of functionality to their users without any performance impact.
+While Qwik helps to scale the application’s functionality expressed via code without effort, Nx is there to make sure your codebase can be managed at any scale. And here’s where qwik-nx comes into play. It provides the same experience of the generation and execution of Qwik applications in an Nx workspace that fits the standards of other Nx projects.
+In the previous article, we have talked about how you get started by initializing your Nx repository with Qwik.
+Recap: you can generate a new workspace by running:
+npx create-nx-workspace@latest org-workspace --preset=qwik-nx
+Or add a new Qwik application to an existing workspace by running:
+npm install -D qwik-nx
+and then
+nx generate qwik-nx:app
+Today let’s talk about what new opportunities does it offer:
+If you stick to using Qwik with Nx from its first days, you might have noticed that importing Qwik components from libraries does not work out of the box as all those dependencies should be configured as vendorRoots for qwikVite plugin in order for Qwik Optimizer to actually include and process them.
+The good news is you don’t have to do it manually any more: qwikNxVite plugin will by default analyze the Nx dependency graph and put all libraries your app uses as vendorRoots for the Qwik Optimizer to consume.
+// vite.config.ts
+
+import { qwikNxVite } from 'qwik-nx/plugins';
+
+export default defineConfig({
+ plugins: [
+ qwikNxVite(), // <= that's all you need!
+ qwikCity(),
+ qwikVite(),
+ tsconfigPaths({ root: '../../' }),
+ ],
+ ...
+});
+And this behavior can be fine-tuned even further to make sure you’re including only what’s intended. As an example, you can mark all Qwik-specific libraries with a specific tag and filter your vendorRoots with it. Let’s see how you can leverage the usage of "framework:qwik" tag.
+1) Mark libs you need with the respective tag:
+// some lib's project.json
+{
+...
+"tags": ["type:ui", "scope:products", "framework:qwik"]
+}
+2) Configure the plugin
+// app's vite.config.ts
+qwikNxVite({
+ // filter out all projects that does not have this tag
+ // use "tags", "name", "path" regex
+ // or even "customFilter" to achieve desired filtering logic
+ includeProjects: { tags: ["framework:qwik"] }
+ debug: true // <= put this to see what projects are included
+})
+This configuration will make qwikNxVite plugin to load only libraries that have respective tag AND are recognized by Nx as your app’s dependency.
+You can configure the Nx Console to always put the tag you want as a default value for the tags field for the qwik-nx:library generator. This can be achieved by setting that default value in the build options of nx.json
+{
+ "generators": {
+ "qwik-nx:library": {
+ "tags": "framework:qwik"
+ }
+ }
+}
++
One of the biggest benefits of using Nx to manage the codebase is their seamless migration process: with a single command nx migrate latest Nx CLI will analyze the workspace and provide the set of migrations written by Nx team to update versions of your dependencies and mutate all necessary bits of code. This is a great capability that allows large codebases to always stay up to date without investing weeks of refactoring.
+qwik-nx aims to provide the same feature for its users to make sure your Qwik apps are updated. This means you don’t have to update the app’s dependencies manually anymore, just run +nx migrate qwik-nx@latest and you are done: it will update all breaking changes and bump versions of Qwik packages.
+Qwik offers a great capability to qwikify$ React components - reuse existing React components and libraries within Qwik applications. Not only does this open the world of using existing UI libraries (Material UI, Threejs, React Spring, etc.) and utilities that were developed for React over time within Qwik applications, but also this turns out to be a powerful migration strategy, as you can migrate your existing React applications chunk by chunk, preserving large parts of functionality in the old syntax without a need to rewrite everything at once.
+As React and Qwik components can not be mixed in the same file, if you check your project right after adding React integration to a plain Qwik app, you will see a new folder src/integrations/react/, and its recommended to place all React components there.
+qwik-nx gives you more control over structuring your "qwikified" react code by defining 2 approaches for you:
+use a react-in-app generator, that behaves in the same manner as native Qwik integration. It will add integrations/react folder in the existing Qwik app
+react-library generator, which will create a separate library to keep qwikified components in it. This means you can now structure your React code as you need and still be able to use it within Qwik apps.
+In the Qwik repository there’re opened PRs for Angular, Vue and Svelte integrations. As soon as they’re available, you will also be able to generate separate libraries for components of those frameworks.
+qwik-nx now supports adding storybooks for your apps and libs! We get you covered in all scenarios:
+run nx g qwik-nx:storybook-configuration to add storybook to your existing libs and apps
+generate a new library with a preconfigured storybook and stories for newly generated components by running nx g qwik-nx:library mylib --storybookConfiguration
+add a new component along with a story for it by running nx g qwik-nx:component --generateStories
+Qwik offers a variety of ways to deploy your app with its CLI, such as Netlify, Cloudflare Pages, Azure, Vercel, etc. With qwik-nx, we already provide support for Cloudflare Pages with Netlify and other integrations on their way.
+To try it out, all you have to do is run nx g qwik-nx:cloudflare-pages-integration myapp. This will create a Cloudflare adapter and add necessary targets to preview and deploy your website with Wrangler (Cloudflare CLI tool).
+As you probably know, Qwik builds your code twice, running a client build first and an SSR one after that. In the Nx world, each step of the process is supposed to be a separate target of the application, so that you can keep full control of what you’re building.
+To achieve this, we’re exposing real build steps and wrapping them into our custom build executor.
+{
+ "targets": [
+ "build": {
+ "executor": "qwik-nx:build",
+ "options": {
+ "runSequence": [
+ "myapp:build.client",
+ "myapp:build.ssr"
+ ],
+ "outputPath": "dist/apps/myapp"
+ }
+ },
+ "build.client": {
+ "executor": "@nrwl/vite:build",
+ "options": {
+ "outputPath": "dist/apps/myapp",
+ "configFile": "apps/myapp/vite.config.ts"
+ }
+ },
+ "build.ssr": {
+ "executor": "@nrwl/vite:build",
+ "options": {
+ "outputPath": "dist/apps/myapp"
+ }
+ }
+ ]
+}
+The cool thing with this build executor is that you can customize and add additional targets to the build process if that’s needed. For example, you can add "myapp:i18n" target to the runSequence property in order to process your translations after building the app.
+Oh, and keep in mind that build executor has another important purpose: it runs type checks for your app before building it!
+Qwik is able to render multiple application instances within the page as it can be attached to an HTML element, that becomes the root of the application. Inspired by this awesome article, we’ve added micro-frontend support to the Qwik apps. As of now this functionality is in beta
+The intent is to bring the same level of convenience as what you get with Nx’s tooling to generate and run Module Federation setup with React and Angular.
+qwik-nx provides all necessary generators and executors to use it with ease.
+To get started, run the following:
+nx g qwik-nx:host shell --remotes=products,settings, which will scaffold the host application and 2 remotes for it
+nx g qwik-nx:remote myanotherremote --host=shell to add a new remote application to your existing setup
+That is it! You can now run either nx serve shell or nx preview shell and see both host and remotes built and served together. This is because qwik-nx's executors for serve and preview are able to process all remotes for you with 0 configuration.
+The package already provides a solid way of working with Qwik applications within Nx workspaces. Our main goal is to eventually reach feature parity with Qwik CLI.
+So feel free to try it out and let us know how it works out for you!
+You are welcome to join the Qwikifiers and Qwik Discord servers to keep track of any updates or find any help you need.
+We are also looking forward to see any new contributions to the package itself.
+Last but not least, we’d like to extend a heartfelt thank you to Ryan Hutchison and ASI Central for their unwavering support of our project.
+At Valor Software, we are passionate about staying at the forefront of technology and are integration partners with the creators of Qwik, Builder.io. If you have any questions or need assistance with your project, don’t hesitate to contact us at sales@valor-software.com
+"CodePush is an App Center cloud service that enables React Native developers to deploy mobile app updates directly to their users’ devices. It works by acting as a central repository that developers can publish certain updates to (for example, JS, HTML, CSS and image changes), and apps can query for updates from (using the provided client SDKs). This allows you to have a more deterministic and direct engagement model with your end users while addressing bugs, adding small features that don’t require you to rebuild a binary, or redistributing it through any public app stores"
+That’s how Microsoft described CodePush in their documentation.
+CodePush is not a new equivalent of TestFlight or Firebase Distribution. It’s a tool for delivering your builds to the end user not just beta delivery tool. In easy words, CodePush is an instrument that gives us the ability to replace JS bundle OTA and avoid the bottleneck of releasing via App Store and Play market. With CodePush you can achieve more web-like release behavior for mobile apps.
+Why did I come up with this tool? We needed to make targeted releases and reduce the time of receiving updates. So we gave it a try because we were already users of AppCenter for managing beta releases.
+CodePush is located inside AppCenter Dashboard.
+There’re you can manage your builds.
+And configure your deployment keys.
+I don’t want to waste a lot of time on detailed instructions for setting up CodePush SDK, so we have documentation with all steps. So let’s move on more interesting things.
+Firstly, we need to specify the strategy for updating rollout targets and how we will force users to get these updates.
+By rollout targets we mean that we can decide who and how can get our updates, and we can get there using deployment keys.
+CodePush gives us a flexible tool for targeting. +By default, we have 2 deployment keys, one for Production, and the other for staging. It’s the easiest option.
+But we can build a fully custom system over these keys. You can create the deployment key for each situation that you need. +Example: +In your app, you have 3 types of users: admins, managers, and customers. With admins and managers, you have a direct channel of communication. So you’re creating 3 keys:
+Save it in locally or put into your DB, CMS etc. Voilà! Now you can start collecting feedback if everything is okay with your app without touching the most fragile group of users.
+How it will look in your app? Easy!
+You just need to provide the key that you saved for user to codePush library using sync method
+Ok, we decided how we will send updates to users, but the next question how do they get it?
+Initially you will have similar a config in your entry file
+Here we also have interesting things to play with. +We have 2 parameters that we can use to achieve the result: checkFrequency and installMode.
+{
+ checkFrequency: codePush.CheckFrequency.ON_APP_START,
+ installMode: codePush.InstallMode.ON_NEXT_RESTART,
+}
+According to docs:
+Your app will automatically download available updates, and apply them the next time the app restarts (like the OS or end user killed it, or the device was restarted). This way, the entire update experience is “silent” to the end user, since they don’t see any update prompt and/or “synthetic” app restarts. +The main problem with this strategy is that the user may not close the application, and you can’t predict when they will get your updates.
+{
+ checkFrequency: codePush.CheckFrequency.ON_APP_RESUME,
+ installMode: codePush.InstallMode.ON_NEXT_RESUME,
+ minimumBackgroundDuration: 30*60 // 30 minutes
+}
+Here with minimumBackgroundDuration parameter we specify how long the application will wait until it will be considered as restarted. So we avoid the case when users don’t close their apps for years. But we are still not sure that users have the latest versions. Because they need to put their app in the background for the time specified in the minimumBackgroundDuration parameter.
+{
+ checkFrequency: codePush.CheckFrequency.ON_APP_RESUME,
+ installMode: codePush.InstallMode.IMMEDIATE,
+}
+The main difference is that each time when the app is resumed it will be updated. The concern is that it could look clunky because the application will restart on the user’s eyes.
+{
+ checkFrequency: codePush.CheckFrequency.MANUAL,
+}
+Last but not least is manual updates, here we can trigger updates programmatically when and how we want. We just need to trigger codePush.sync method.
+Example with updateDialog and production/staging keys:
+For example, I’ll choose Manual updates
+const codePushOptions = {
+ checkFrequency: codePush.CheckFrequency.MANUAL,
+};
+
+const AppWithCodePush = codePush(codePushOptions)(App);
+
+AppRegistry.registerComponent('CodePushPoc', () => AppWithCodePush);
+And our app will be look like
+Using a switcher we will choose our environment. +Then we need to push our first bundle to the CodePush using the AppCenter CLI
+appcenter codepush release-react -a YOUR_ORG_NAME/YOR_APP_NAME -d Prod
+And you’ll get your build online:
+We can modify our code somehow and push it to the stage environment.
+I’ll create a new branch with the name feature/new-bg-color and let’s change backgroundColor to #6b5b95 and push it to the Stage environment
+appcenter codepush release-react -a YOUR_ORG_NAME/YOR_APP_NAME -d Stage
+We will be able to see this:
+CodePush is a great tool that helps you to improve and speed up your releases but it requires you to be more focused cause the chance of mistakes increases too. You can check the example app in the repo (note it is configured only for Android).
+While working on a NativeScript Angular app with millions of downloads across platforms, I faced a tricky problem: I needed to generate an image that the user could share. Usually this can be done quite easily if this view is visible in your application, where you could just render it to an image (in fact, it has been done before). The difficult part here was that this view did not show anywhere in the app, and even had special layout constraints.
+Taking a screenshot of a view is an easy task.
+On Android, it’s a simple case of creating a bitmap, attaching it to a canvas, and then drawing the view directly on that canvas:
+export function renderToImageSource(hostView: View): ImageSource {
+ const bitmap = android.graphics.Bitmap.createBitmap(hostView.android.getWidth(), hostView.android.getHeight(), android.graphics.Bitmap.Config.ARGB_8888);
+ const canvas = new android.graphics.Canvas(bitmap);
+ // ensure we start with a blank transparent canvas
+ canvas.drawARGB(0, 0, 0, 0);
+ hostView.android.draw(canvas);
+ return new ImageSource(bitmap);
+}
+On the iOS side, we have a very similar concept. We begin the image context, and then we render the view in that context:
+export function renderToImageSource(hostView: View): ImageSource {
+ UIGraphicsBeginImageContextWithOptions(CGSizeMake(hostView.ios.frame.size.width, hostView.ios.frame.size.height), false, Screen.mainScreen.scale);
+ (hostView.ios as UIView).layer.renderInContext(UIGraphicsGetCurrentContext());
+ const image = UIGraphicsGetImageFromCurrentImageContext();
+ UIGraphicsEndImageContext();
+ return new ImageSource(image);
+}
+There it is! Taking a screenshot of any NativeScript view with a couple of lines of code!
+Now let’s take it one step further. Let’s use some clever NativeScript magic and create our layout completely detached from the native view tree:
+export function loadViewInBackground(view: View): void {
+ // get the context (android only)
+ const context = isAndroid ? Utils.android.getApplicationContext() : {};
+ // now create the native view and setup the styles (CSS) as if it were a root view
+ view._setupAsRootView(context);
+ // load the view to apply all the native properties
+ view.callLoaded();
+}
+That should do it! Now let’s just call that function and… oh…
+Of course! This view has no size! So we need to measure and layout it. That’s simple enough:
+export function measureAndLayout(hostView: View, width?: number, height?: number) {
+ const dpWidth = width ? Utils.layout.toDevicePixels(width) : 0;
+ const dpHeight = height ? Utils.layout.toDevicePixels(height) : 0;
+ const infinity = Utils.layout.makeMeasureSpec(0, Utils.layout.UNSPECIFIED);
+ hostView.measure(width ? Utils.layout.makeMeasureSpec(dpWidth, Utils.layout.EXACTLY) : infinity, height ? Utils.layout.makeMeasureSpec(dpHeight, Utils.layout.EXACTLY) : infinity);
+
+ hostView.layout(0, 0, hostView.getMeasuredWidth(), hostView.getMeasuredHeight());
+}
+Now this view should render exactly at the width and height that I require. Let’s give it a try:
+It worked! Turns out it wasn’t as difficult as I thought. Now that we’re ready to go, let’s add the styling. Let’s keep the text intact, but add some styling. We need some border-radius and some margins.
+.view-shot {
+ border-radius: 50%;
+ border-width: 1;
+ border-color: red;
+ margin: 10;
+}
+Now run that through our render and…
+Where did my margins go? Well, it turns out that, on both platforms, the parent layout is responsible for the children’s positioning, and margins are just some extra positioning information given to the parent. Another quick fix then, just wrap the view with another layout:
+export function loadViewInBackground(view: View): View {
+ // get the context (android only)
+ const context = isAndroid ? Utils.android.getApplicationContext() : {};
+ // create a host view to ensure we're preserving margins
+ const hostView = new GridLayout();
+ hostView.addChild(view);
+ // now create the native view and setup the styles (CSS) as if it were a root view
+ hostView._setupAsRootView(context);
+ // load the view to apply all the native properties
+ hostView.callLoaded();
+ return hostView;
+}
+And the result:
+Success! We can now keep adding the remainder, like an image. The image has to be downloaded, so let’s add a delay between creating the view and screenshotting it (we can cache it later). And… oh no, not again.
+After digging through the native source code I realized that on Android a lot of views (like an image) will only fully render when they’re attached to the window, so how do we attach it to the view hierarchy without showing it and without affecting the layout at all?
+The main function of a ViewGroup is to lay out the views in a particular way. So first, let’s create a view that will not do any layout:
+@NativeClass
+class DummyViewGroup extends android.view.ViewGroup {
+ constructor(context: android.content.Context) {
+ super(context);
+ return global.__native(this);
+ }
+ public onMeasure(): void {
+ this.setMeasuredDimension(0, 0);
+ }
+ public onLayout(): void {
+ //
+ }
+}
+class ContentViewDummy extends ContentView {
+ createNativeView() {
+ return new DummyViewGroup(this._context);
+ }
+}
+Now we just need to make sure that it’s visibility is set to collapse and use a very convenient method from the AppCompatActivity (addContentView) to add the view to the root of the activity, essentially adding it to the window but completely invisible.
+export function loadViewInBackground(view: View) {
+ const hiddenHost = new ContentViewDummy();
+ const hostView = new GridLayout(); // use a host view to ensure margins are respected
+ hiddenHost.content = hostView;
+ hiddenHost.visibility = 'collapse';
+ hostView.addChild(view);
+ hiddenHost._setupAsRootView(Utils.android.getApplicationContext());
+ hiddenHost.callLoaded();
+
+ Application.android.startActivity.addContentView(hiddenHost.android, new android.view.ViewGroup.LayoutParams(0, 0));
+
+ return {
+ hiddenHost,
+ hostView
+ };
+}
+And we’re done!
+So far we have only dealt with NativeScript views, but what we really care about is how we generate these views from Angular components and templates. So here’s how:
+import { ComponentRef, inject, Injectable, Injector, TemplateRef, Type, ViewContainerRef } from '@angular/core';
+
+import { generateNativeScriptView, isDetachedElement, isInvisibleNode, NgView, NgViewRef } from '@nativescript/angular';
+import { ContentView, ImageSource, View, ViewBase } from '@nativescript/core';
+import { disposeBackgroundView, loadViewInBackground, measureAndLayout, renderToImageSource } from '@valor/nativescript-view-shot';
+
+export interface DrawableOptions<T = unknown> {
+ /**
+ * target width of the view and image, in dip. If not specified, the measured width of the view will be used.
+ */
+ width?: number;
+ /**
+ * target height of the view and image, in dip. If not specified, the measured height of the view will be used.
+ */
+ height?: number;
+ /**
+ * how much should we delay the rendering of the view into the image.
+ * This is useful if you want to wait for an image to load before rendering the view.
+ * If using a function, it will be called with the NgViewRef as the first argument.
+ * The NgViewRef can be used to get the EmbeddedViewRef/ComponentRef and the NativeScript views.
+ * This is useful as you can fire an event in your views when the view is ready, and then complete
+ * the promise to finish rendering to image.
+ */
+ delay?: number | ((viewRef: NgViewRef<T>) => Promise<void>);
+ /**
+ * The logical host of the view. This is used to specify where in the DOM this view should lie.
+ * The practical use of this is if you want the view to inherit CSS styles from a parent.
+ * If this is not specified, the view will be handled as a root view,
+ * meaning no ancestor styles will be applied, similar to dropping the view in app.component.html
+ */
+ logicalHost?: ViewBase | ViewContainerRef;
+}
+
+@Injectable({
+ providedIn: 'root',
+})
+export class ViewShotService {
+ private myInjector = inject(Injector);
+ async captureInBackground<T>(type: Type<T> | TemplateRef<T>, { width, height, delay, logicalHost }: DrawableOptions<T> = {}): Promise<ImageSource> {
+ // use @nativescript/angular helper to create a view
+ const ngView = generateNativeScriptView(type, {
+ injector: logicalHost instanceof ViewContainerRef ? logicalHost.injector : this.myInjector),
+ keepNativeViewAttached: true,
+ });
+ // detect changes on the component
+ if (ngView.ref instanceof ComponentRef) {
+ ngView.ref.changeDetectorRef.detectChanges();
+ } else {
+ ngView.ref.detectChanges();
+ }
+ // currently generateNativeScriptView will generate the view wrapped in a ContentView
+ // this is a minor bug that should be fixed in a future version on @nativescript/angular
+ // so let's add a failsafe here to remove the parent if it exists
+ if (ngView.view.parent) {
+ if (ngView.view.parent instanceof ContentView) {
+ ngView.view.parent.content = null;
+ } else {
+ ngView.view.parent._removeView(ngView.view);
+ }
+ }
+ // use the method that loads a view in the background
+ const drawableViews = loadViewInBackground(ngView.view, host);
+ const { hostView } = drawableViews;
+
+ // do the measuring of the hostView
+ measureAndLayout(hostView, width, height);
+
+ // this delay is either a function or time in ms
+ // which is useful for letting async views load or animate
+ if (typeof delay === 'function' || (typeof delay === 'number' && delay >= 0)) {
+ if (typeof delay === 'number') {
+ await new Promise<void>((resolve) =>
+ setTimeout(() => {
+ resolve();
+ }, delay)
+ );
+ } else {
+ await delay(ngView);
+ if (ngView.ref instanceof ComponentRef) {
+ ngView.ref.changeDetectorRef.detectChanges();
+ } else {
+ ngView.ref.detectChanges();
+ }
+ }
+ // do a final measure after the last changes
+ measureAndLayout(hostView, width, height);
+ }
+
+ // call the render function
+ const result = renderToImageSource(hostView);
+
+ // dispose views and component
+ disposeBackgroundView(drawableViews);
+ ngView.ref.destroy();
+ return result;
+ }
+
+ // unchanged from the original implementation
+ captureRenderedView(view: View) {
+ return renderToImageSource(view);
+ }
+}
+Hopefully this gave you an insight into how the native platforms display their views and how NativeScript can be used in advanced view hierarchy composition.
+The NativeScript plugin has been released as @valor/nativescript-view-shot and you can check its source code in our shared plugin workspace.
+You can now enjoy creating views in the background for either showing, saving or sharing them in social media, like the following mockup:
+Official NativeScript professional support partner, Valor is actively contributing to the NativeScript ecosystem, providing enterprise support, consulting, and team augmentation. Valor Software additionally helps with all aspects of the SDLC, web, backend and mobile. Book a call today or email us at sales@valor-software.com if you need help.
+The Nx ecosystem is excellent because, on the one hand, it allows different teams to work under the same monorepo and build robust enterprise solutions. On the other hand, the basic Nx functionality is relatively easy. That’s why Nx-based solutions turned out more popular last time. Consequently, increasing monorepos include technologies such as React, Angular, NestJS, and even Golang. Moreover, these libraries and frameworks are not alone under the monorepos. They often use a lot of 3-d party libraries and custom modules. One of the most painful topics regarding software development, including Nx, is managing dependency updates. Especially dependencies with breaking changes that need to be resolved in order to get the application back into a working state. This article proposes one possible solution. It is not meant as a final solution and or source of truth. These are only initial thoughts about the topic.
+Nx official documentation tells us regarding migrations the following.
+"Not only does nx migrate update you to the latest version of Nx, but it also updates the versions of dependencies that we support and test, such as Jest and Cypress. You can also use the migrate command to update any Nx plugin."
+The Nx team did a great job because you can update the whole solution as efficiently as possible. That’s why I recommend the following reading. Many packages have supported it. But "many" is not equal to "all." Let’s look at the following case.
+There is a demo monorepo. It contains a React Application. Also, it includes a common lib.
+Let’s dig into the demo functionality and code specific.
+The client React app is quite simple. Let’s run it.
+git clone git@github.com:buchslava/nx-custom-migration-demo.git
+npm i
+npx nx run client:serve
+The behavior of a couple of forms above is similar. They demonstrate the sum of two numbers. Related code is placed here. Let’s look at the code.
+import { sumProxy } from '@nx-custom-migration-demo/common-lib';
+import { useState } from 'react';
+import { deprecatedSum } from 'try-lib';
+
+export function App() {
+ const [a, setA] = useState<number>(0);
+ const [b, setB] = useState<number>(0);
+ const [c, setC] = useState<number>();
+
+ const [d, setD] = useState<number>(0);
+ const [e, setE] = useState<number>(0);
+ const [f, setF] = useState<number>();
+
+ return (
+ <>
+ <div>
+ <h2>Using internal lib</h2>
+ <div>
+ <input
+ value={a}
+ onChange={(e) => {
+ setA(+e.target.value);
+ }}
+ />
+ +
+ <input
+ value={b}
+ onChange={(e) => {
+ setB(+e.target.value);
+ }}
+ />
+ <button
+ onClick={() => {
+ setC(sumProxy(a, b));
+ }}
+ >
+ is
+ </button>
+ <span>{c}</span>
+ </div>
+ </div>
+
+ <div>
+ <h2>Using external lib</h2>
+ <div>
+ <input
+ value={d}
+ onChange={(e) => {
+ setD(+e.target.value);
+ }}
+ />
+ +
+ <input
+ value={e}
+ onChange={(e) => {
+ setE(+e.target.value);
+ }}
+ />
+ <button
+ onClick={() => {
+ setF(deprecatedSum(d, e));
+ }}
+ >
+ is
+ </button>
+ <span>{f}</span>
+ </div>
+ </div>
+ </>
+ );
+}
+
+export default App;
+Pay attention to the following points.
+<button
+ onClick={() => {
+ setC(sumProxy(a, b));
+ }}
+>
+ is
+</button>
+The code above describes the result calculation for the first form. We take sumProxy from the internal library @nx-custom-migration-demo/common-lib.
+<button
+ onClick={() => {
+ setF(deprecatedSum(d, e));
+ }}
+>
+ is
+</button>
+The code above describes the result calculation for the second form. We take deprecatedSum from the external library try-lib. +If we look at package.json we can find the library.
+"try-lib": "^1.0.1",
+The library is placed here.
+import { deprecatedSum } from "try-lib";
+
+export function sumProxy(a: number, b: number): number {
+ return deprecatedSum(a, b);
+}
+It looks similar to the calculation for the second form on the client because of deprecatedSum from try-lib.
+It’s time to look at the external library. You can find the library on github and npm. +There is a couple of versions.
+This version is based on the following code.
+export function deprecatedSum(a: number, b: number): number {
+ return a + b;
+}
+The main feature of this version is that this version contains a breaking change. The breaking change is that deprecatedSum has changed to sum.
+export function sum(a: number, b: number): number {
+ return a + b;
+}
+Pay attention that now we use try-lib@1.0.1. A couple of important places look the following.
+Let’s change it to version 2.0.0 and run npm i.
+It’s easy to predict the result.
+We have a broken solution because try-lib has a breaking change.
+Of course, we can fix the breaking changes manually and forget this story. But this approach is suitable mainly for Ad hoc or for individual developers. We need to count the following factors when discussing teams or enterprise solutions.
+We don’t know when we should apply the changes. Let’s imagine a team has decided to bump the library version. One of the developers provides a new branch, including fixes. But the manager postponed this branch merging, and the team had no idea when the team would continue with this task. What happens there? The team wasted time because the branch was outdated, and in most cases reasonable to forget the branch and repeat breaking changes and fixing again. Another option is Version Control System (VCS) conflict resolution. But this approach is the worst because we risk damaging the solution. Of course, it depends on the number of new modifications and breaking changes.
+Of course, the VCS (say, Git) can log the changes. Despite this, information could be lost due to rebasing. Perhaps we must invent a new approach to log the changes more efficiently.
+As I previously told you, Nx has a powerful migration functionality that allows upgrading Nx with other dependencies it supports. But let’s imagine the situation when we don’t need to upgrade the Nx version. Still, at the same time, we need to upgrade an individual dependency by automatically breaking changes and fixing them exactly as Nx migration does.
+Analyzing the above, let’s gather the requirements for the task.
+We need to upgrade an individual dependency at any time. That’s why the following approach makes sense. A developer or a team doesn’t work with the result code; on the contrary, they prepare an algorithm (functionality) that transforms the solution according to the expected dependency version. It allows the team to apply the changes at any time when needed. On the one hand, it makes the project more expensive. On the other hand, it decreases many risks. We have a predictable flow because we know what happens (the result of applying) and how it happens (the algorithm/functionality). Also, there is one more important thing. If something wrong happens, we can roll back the changes and repeat them easily because we don’t need to provide manual jobs.
We need to deal with different versions of the package.
The functionality above should be Nx-friendly.
The most important thing here is that the functionality above could be shared between Nx society and used as a part of a future Nx plugin. More information here.
+When I was thinking about the implementation thoughts above, I faced with Using Nx Workspace generators to scaffold new blog posts by Juri Strumpflohner. I recommend this article because I found this helpful approach to resolving my task there. Briefly, my plan is about a new workspace generator creation. The workspace generator should get the related package name and modify the source code afterward.
+It’s time to dig into the solution.
+First, I created the new workspace generator with the following command:
+npx nx g @nx/workspace:workspace-generator code-transformer
+This creates a new folder in tools/generators/code-transformer with an index.ts and a schema.json file.
+This file describes the metadata of our generator. It allows Nx to discover the generator and provides metadata such as the possible argument the generator takes for validation purposes or dynamically rendering a UI like Nx Console does.
+In our case, we have only one input parameter for the generator, and we need to pass a related dependency name.
+I opened the tools/generators/code-transformer/schema.json and adjusted the schema to match our requirements.
+{
+ "$schema": "http://json-schema.org/schema",
+ "cli": "nx",
+ "$id": "code-transformer",
+ "type": "object",
+ "properties": {
+ "package": {
+ "type": "string",
+ "description": "Package",
+ "$default": {
+ "$source": "argv",
+ "index": 0
+ }
+ }
+ },
+ "required": ["package"]
+}
+Next, I opened the tools/generators/code-transformer/index.ts and put my solution there. +Please, read the comments in the code. It will help you to understand the solution.
+import { Tree } from '@nrwl/devkit';
+import * as fs from 'fs';
+import * as path from 'path';
+import * as semver from 'semver';
+import * as util from 'util';
+import { Project } from 'ts-morph';
+import compile from './compiler';
+
+const readFile = util.promisify(fs.readFile);
+
+export default async function (tree: Tree, schema: any) {
+ // Get "package.json"
+ const packageJson: any = JSON.parse(
+ (await readFile('package.json')).toString()
+ );
+ // "schema.package" contains the related dependency name
+ // if it does not exist in "package.json" then
+ // throw an error and stop the generator
+ if (!packageJson.dependencies[schema.package]) {
+ throw Error(`Dependency "${schema.package}" is not found in package.json`);
+ }
+ // Get an existing version of the related dependency
+ const existingVersion = packageJson.dependencies[schema.package].replace(
+ /^[\D]{1}/,
+ ''
+ );
+ // Get `updates.json` config
+ const updatesJson: any = JSON.parse(
+ (await readFile('tools/updates/updates.json')).toString()
+ );
+ const activities: string[] = [];
+ // Iterate over `updates` array
+ for (const record of updatesJson.updates) {
+ // if the existing (bumped) version from the package is more than "changes"-based and the package is expected
+ // add the related function to activities
+ if (schema.package === record.package && semver.gte(existingVersion, record.version)) {
+ activities.push(path.resolve(process.cwd(), record.implementation));
+ }
+ }
+ // Compile the related TS files
+ compile(activities, {});
+
+ // In this example, we use only "ts-morph" for the source code transformation
+ const project = new Project();
+ // Pass expected paths of source code
+ const sources = [
+ 'libs/**/*.ts',
+ 'libs/**/*.tsx',
+ 'apps/client/**/*.ts',
+ 'apps/client/**/*.tsx',
+ ];
+
+ for (const activity of activities) {
+ // Get related transformer-function
+ const fn = require(activity).default;
+ // Run it
+ fn(project, sources);
+ }
+ // Save the changes
+ await project.save();
+}
+You can read explained source code above here. +It’s time to dig into the "Updates" concept. There is the following structure:
+File update/update.json represents a configuration regarding all updates.
+The updates value is an array of records.
Each record contains the following data: package (dependency name), description (change), and version. But the most important data here is implementation. It represents a file name containing the transformation’s code.
+In our case, the try-lib dependency contains changes for version 2.0.0 or more than this version.
+Likewise, as in the previous code, I will explain how transformation works as in the previous code. Please, read the comments.
+One important note. The following Typescript transformations idea is based on ts-morph library. Please, read ts-morph documentation.
+Let’s look at tools/changes-registry/index.ts (you can find the related source here)
+import { Project, SyntaxKind, Node } from 'ts-morph';
+
+// This is a recursive function that changes
+// some code fragments ("oldName" to "newName")
+// according to its Node Type
+function walkAndChange(
+ node: Node,
+ nodeKinds: SyntaxKind[],
+ oldName: string,
+ newName: string
+) {
+ // criteria matching
+ if (
+ nodeKinds.includes(node.getKind()) &&
+ node.getFullText().trim() === oldName
+ ) {
+ node.replaceWithText(newName);
+ return;
+ }
+ // recursive call
+ node.forEachChild((c) => walkAndChange(c, nodeKinds, oldName, newName));
+}
+
+// Function-transformer takes Ts-morph project and related paths
+export default function (project: Project, sources: string[]) {
+ project.addSourceFilesAtPaths(sources);
+ const files = project.getSourceFiles();
+
+ // iterate the related source files
+ for (const file of files) {
+ // change "deprecatedSum" to "sum" in
+ // imports (SyntaxKind.ImportSpecifier) and
+ // in the rest (SyntaxKind.Identifier) of the code
+ walkAndChange(
+ file,
+ [SyntaxKind.Identifier, SyntaxKind.ImportSpecifier],
+ 'deprecatedSum',
+ 'sum'
+ );
+ }
+}
+Of course, the example above provides a fundamental transformation, and this logic could be much more difficult in real life.
+It’s time to use the solution, and let’s repeat it from the beginning
+Change the version of try-lib from 1.0.1 to 2.0.0 and run npm i.
+Run the following command.
+npx nx workspace-generator code-transformer -- try-lib --dry-run
+Let’s run the app to make sure that it works well.
+npx nx run client:serve
+Because this article represents only an idea or a concept, I’d like to focus on points that would be nice to consider in the future.
+The first helpful thing is to store information about past updates, and we don’t need to repeat transformations that were passed before.
We need to deal with other languages, not only Typescript.
Finally, we need to have a more flexible set of the generator’s input parameters.
+From time to time, my colleagues and I encounter situations where we need to implement custom visual solutions during frontend projects. This includes various tasks, such as charts, diagrams, and interactive schemes. In one project, I only had to deal with the charts and was able to resolve the issue quickly and efficiently by using a free chart library. However, in the next project, I was given the choice of the approach and the library to use. After researching and seeking advice from authoritative sources, I decided that the D3 library was the best solution for three main reasons.
+Flexibility.
Despite many popular existing patterns, D3 allows us to provide any custom SVG-based graphic.
Popularity.
This library is one of the most commonly used. It has a big community and a lot of resources for learning.
Universality.
There are many existing patterns for different charts and visualizations based on data. Also, it supports various data formats like JSON and CSV.
Despite D3’s popularity, I noticed some difficulties during my research that prompted me to write this article. I want to help my colleagues navigate similar situations.
+Important! All the projects I mentioned earlier are React-based, so all the code examples I provide are also connected to React. I don’t want to focus on unrelated topics. My goal here is to provide minimalistic solutions, which is why I will use JavaScript instead of TypeScript.
+As mentioned before, I want to provide fast and easy-to-use solutions, even if they are small and not immediately noticeable. That’s why I have created a series of simple examples demonstrating how to create a simple React Ranking Bar component using D3.
+Now, let’s focus on a couple of the key points.
+We have the following kind of data—fruits as keys with corresponding values.
+const data = {
+ Apple: 100,
+ Apricot: 200,
+ Araza: 5,
+ Avocado: 1,
+ Banana: 150,
+ Bilberry: 700,
+ // ...
+ Feijoa: 11,
+ Fig: 0,
+};
+We are expecting a simple visualization with the following features: +1. You order all bars (fruits) starting from the biggest values to the smallest. +2. All bars should contain the related fruit name if possible. If the fruit name width is smaller than the bar width, then the name should be cropped and "…" added or hidden. +3. The component should be responsive. If the user changes the screen size, the component should be redrawn.
+I’d like to skip the project setup and focus directly on the code, since I will provide all the working examples below. In my first step, I will give an empty SVG-based component. +Our App component should look like this:
+import React from "react";
+import StackedRank from "./StackedRank";
+import "./style.css";
+
+export default function App() {
+ return (
+ <div id="root-container">
+ <StackedRank />
+ </div>
+ );
+}
+};
+Pay attention to the attribute id="root-container". This is a chart container that we will use inside the StackedRank
component.
Let’s look at StackedRank
component.
import React, { useEffect, useState, useRef } from "react";
+import * as d3 from "d3";
+
+export default function StackedRank() {
+ const svgRef = useRef();
+ const [width, setWidth] = useState();
+ const [height, setHeight] = useState();
+
+ const recalculateDimension = () => {
+ const getMaxWidth = () =>
+ parseInt(
+ d3.select("#root-container")?.node()?.getBoundingClientRect()?.width ??
+ 100,
+ 10
+ );
+ setWidth(getMaxWidth());
+ setHeight(50);
+ };
+
+ const renderSvg = () => {
+ const svg = d3.select(svgRef.current);
+
+ svg
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", width)
+ .attr("y", 0)
+ .attr("height", height)
+ .attr("fill", "grey");
+ };
+
+ useEffect(() => {
+ recalculateDimension();
+ }, []);
+
+ useEffect(() => {
+ if (width && height) {
+ renderSvg();
+ }
+ }, [width, height]);
+
+ if (!width || !height) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+}
+You can find the full solution on StackBlitz.
+Let me explain some important points about the code above. First of all, we need to handle the component container and shapes. The chart width and height are undefined by default.
+const [width, setWidth] = useState();
+const [height, setHeight] = useState();
+This is why we need to set them with the following code:
+useEffect(() => {
+ recalculateDimension();
+}, []);
+const recalculateDimension = () => {
+ const getMaxWidth = () =>
+ parseInt(
+ d3.select("#root-container")?.node()?.getBoundingClientRect()?.width ??
+ 100,
+ 10
+ );
+ setWidth(getMaxWidth());
+ setHeight(50);
+};
+In the code above, we calculate the component width that fits the available screen width using the parent container root-container. The height should be fixed (50px). +Also, pay extra attention to the following code in particular:
+if (!width || !height) {
+ return <></>;
+}
+
+return <svg ref={svgRef} width={width} height={height} />;
+First of all, we display our graphical content in SVG format. Secondly, we shouldn’t show it if its shapes are undefined.
+useEffect(() => {
+ if (width && height) {
+ renderSvg();
+ }
+}, [width, height]);
+Let’s deal with the graphical content when the component shapes are defined. +The following code
+const renderSvg = () => {
+ const svg = d3.select(svgRef.current);
+
+ svg
+ .append("rect")
+ .attr("x", 0)
+ .attr("width", width)
+ .attr("y", 0)
+ .attr("height", height)
+ .attr("fill", "grey");
+};
+just draws a gray rectangle according to the component shapes. +That’s all for Step #1.
+The main goal of this step is to make StackedRank
component like a Stacked Rank chart, excuse my tautology. So, we need to draw the below
+instead of just a gray rectangle.
The related code changes are in Stackblitz +The first thing, we need to do is to define data in the App component and pass it to the chart component.
+const data = {
+ Apple: 100,
+ Apricot: 200,
+ Araza: 5,
+ Avocado: 1,
+ Banana: 150,
+ // ...
+ Durian: 20,
+ Elderberry: 35,
+ Feijoa: 11,
+ Fig: 0,
+};
+
+export default function App() {
+ return (
+ <div id="root-container">
+ <StackedRank data={data} />
+ </div>
+ );
+}
+Traditionally, I want to provide the full component code and comment on it after.
+import React, { useEffect, useState, useRef } from "react";
+import * as d3 from "d3";
+
+function getNormalizedData(data, width) {
+ const tmpData = [];
+ let total = 0;
+ for (const key of Object.keys(data)) {
+ if (data[key] > 0) {
+ tmpData.push({ fruit: key, value: data[key] });
+ total += data[key];
+ }
+ }
+ tmpData.sort((a, b) => b.value - a.value);
+ let x = 0;
+ for (const record of tmpData) {
+ const percent = (record.value / total) * 100;
+ const barwidth = (width * percent) / 100;
+ record.x = x;
+ record.width = barwidth;
+ x += barwidth;
+ }
+ return tmpData;
+}
+
+export default function StackedRank({ data }) {
+ const svgRef = useRef();
+ const [normalizedData, setNormalizedData] = useState();
+ const [width, setWidth] = useState();
+ const [height, setHeight] = useState();
+
+ const recalculateDimension = () => {
+ const getMaxWidth = () =>
+ parseInt(
+ d3.select("#root-container")?.node()?.getBoundingClientRect()?.width ??
+ 100,
+ 10
+ );
+ setWidth(getMaxWidth());
+ setHeight(50);
+ };
+
+ const renderSvg = () => {
+ const svg = d3.select(svgRef.current);
+
+ const color = d3
+ .scaleOrdinal()
+ .domain(Object.keys(normalizedData))
+ .range(d3.schemeTableau10);
+
+ svg
+ .selectAll()
+ .data(normalizedData)
+ .enter()
+ .append("g")
+ .append("rect")
+ .attr("x", (d) => d.x)
+ .attr("width", (d) => d.width - 1)
+ .attr("y", 0)
+ .attr("height", 50)
+ .attr("fill", (_, i) => color(i));
+
+ svg
+ .selectAll("text")
+ .data(normalizedData)
+ .join("text")
+ .text((d) => d.fruit)
+ .attr("x", (d) => d.x + 5)
+ .attr("y", (d) => 30)
+ .attr("width", (d) => d.width - 1)
+ .attr("fill", "white");
+ };
+
+ useEffect(() => {
+ recalculateDimension();
+ }, []);
+
+ useEffect(() => {
+ if (normalizedData) {
+ renderSvg();
+ }
+ }, [normalizedData]);
+
+ useEffect(() => {
+ if (width && height && data) {
+ setNormalizedData(getNormalizedData(data, width));
+ }
+ }, [data, width, height]);
+
+ if (!width || !height || !normalizedData) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+}
+The most tedious and time-consuming part of this step is the data transformation, which is contained in the 'getNormalizedData' function. I don’t want to explain it in detail. The main purposes of this function are:
+Provide a more convenient data representation - an array of objects instead of one object.
+Contain UI-consumed data: the X and width of the bar.
+Pay attention to the following lines:
+const percent = (record.value / total) * 100;
+const barwidth = (width * percent) / 100;
+The width of each bar should be calculated depending on the Fruit Total value and the component width. +Also, I recommend debugging or "console.log’ing" this code using my example: Stackblitz +The code of the component for Step #2 has a bit different initialization logic.
+useEffect(() => {
+ recalculateDimension();
+}, []);
+
+useEffect(() => {
+ if (normalizedData) {
+ renderSvg();
+ }
+}, [normalizedData]);
+
+useEffect(() => {
+ if (width && height && data) {
+ setNormalizedData(getNormalizedData(data, width));
+ }
+}, [data, width, height]);
+Let me translate the React code above into human-readable form. Firstly, we calculate the component dimensions. Once we have them, we normalize the data because we now have enough information. Finally, with the normalized data, we render our SVG using D3. And now, we are ready to focus on rendering. +As you can see below, our rendering consists of four parts. Please read my comments in the code. Don’t worry if you are not very familiar with D3 specifically. While the aim of this article is not to teach D3, I would like to provide you with some important D3-specific implementations.
+const renderSvg = () => {
+ // "Associate" `svg` varable with svgRef:
+ // return <svg ref={svgRef} width={width} height={height} />;
+ const svg = d3.select(svgRef.current);
+
+ // Get the list of colors using D3-way
+ const color = d3
+ .scaleOrdinal()
+ // Apple, Apricot, Araza, Avocado, etc
+ .domain(Object.keys(normalizedData))
+ .range(d3.schemeTableau10);
+
+ // Draw all expected bars according to `normalizedData`
+ svg
+ .selectAll()
+ // connect our data here
+ .data(normalizedData)
+ .enter()
+ // now we are ready for drawing
+ .append("g")
+ // draw the rect
+ .append("rect")
+ // `d` variable represents an item of normalizedData
+ // that we connected before
+ // please, also look at `getNormalizedData`
+ // we need to take x and width from there
+ .attr("x", (d) => d.x)
+ .attr("width", (d) => d.width - 1)
+ .attr("y", 0)
+ .attr("height", 50)
+ // Color for the bar depends only on its order `i`
+ .attr("fill", (_, i) => color(i));
+
+ // Put texts over all related bars according to `normalizedData`
+ svg
+ // we need to work with text
+ .selectAll("text")
+ .data(normalizedData)
+ // we need to work with text
+ .join("text")
+ // because `d` variable represents an item of normalizedData
+ // we can take the related fruit name from there
+ .text((d) => d.fruit)
+ // set x, y, and color
+ .attr("x", (d) => d.x + 5)
+ .attr("y", (d) => 30)
+ .attr("fill", "white");
+ // also, you can set more attributes like Font Family, etc...
+};
+If the comments above are not enough for a complete understanding of the topic, I highly recommend reading additional D3 resources. Additionally, I think live examples from Stackblitz, CodePen, etc., would help understand D3 principles. +And now, after a lengthy explanation, let’s take a look at how the example works.
+It looks predictable but a bit ugly. We need to deal with the overlapping text. Also, this component should be responsive. If the user changes the screen size, the component should be redrawn.
+As always, I want to provide the complete code first. Stackblitz
+import React, { useEffect, useState, useRef } from 'react';
+import * as d3 from 'd3';
+import { dotme, useWindowSize } from './utils';
+
+function getNormalizedData(data, width) {
+ // let's skip it because
+ // this implementation hasn't changed comparing
+ // with the previous implementation
+}
+
+export default function StackedRank({ data }) {
+ const svgRef = useRef();
+ const [fullWidth, fullHeight] = useWindowSize();
+ const [normalizedData, setNormalizedData] = useState();
+ const [width, setWidth] = useState();
+ const [height, setHeight] = useState();
+
+ const recalculateDimension = () => {
+ // let's skip it because
+ // this implementation hasn't changed comparing
+ // with the previous implementation
+ };
+
+ const renderSvg = () => {
+ const svg = d3.select(svgRef.current);
+
+ svg.selectAll('*').remove();
+
+ const color = d3
+ .scaleOrdinal()
+ .domain(Object.keys(normalizedData))
+ .range(d3.schemeTableau10);
+
+ svg
+ .selectAll()
+ .data(normalizedData)
+ .enter()
+ .append('g')
+ .append('rect')
+ .attr('x', (d) => d.x)
+ .attr('width', (d) => d.width - 1)
+ .attr('y', 0)
+ .attr('height', 50)
+ .attr('fill', (_, i) => color(i));
+
+ svg
+ .selectAll('text')
+ .data(normalizedData)
+ .join('text')
+ .text((d) => d.fruit)
+ .attr('x', (d) => d.x + 5)
+ .attr('y', (d) => 30)
+ .attr('width', (d) => d.width - 1)
+ .attr('fill', 'white');
+
+ svg.selectAll('text').call(dotme);
+ };
+
+ useEffect(() => {
+ if (normalizedData) {
+ renderSvg();
+ }
+ }, [normalizedData]);
+
+ useEffect(() => {
+ if (width && height) {
+ setNormalizedData(getNormalizedData(data, width));
+ }
+ }, [width, height]);
+
+ useEffect(() => {
+ if (data) {
+ recalculateDimension();
+ }
+ }, [data, fullWidth, fullHeight]);
+
+ if (!width || !height || !normalizedData) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+}
+Despite the fixed component height (50px), we need to recalculate its width according to the available screen width for each window resize. That’s why I added a new hook. The hook is useWindowSize. You can find the related source here Stackblitz
+Let me highlight the essential points regarding responsibility.
+const [fullWidth, fullHeight] = useWindowSize();
+Get available screen dimensions fullWidth, fullHeight.
+ useEffect(() => {
+ if (data) {
+ recalculateDimension();
+ }
+ }, [data, fullWidth, fullHeight]);
+Recalculate component size if the screen has changed.
+Before we discuss smart texts, I recommend taking a look at the following solution. This is important because I used the dotme code as a prototype. The issue with the original dotme is that it limits a string by word criteria (see the original solution). However, in this example, the fruit names should be limited by character criteria. Let me explain my version of dotme.
+export function dotme(texts) {
+ texts.each(function () {
+ const text = d3.select(this);
+ // get an array of characters
+ const chars = text.text().split('');
+
+ // make a temporary minimal text contains one character (space) with ...
+ let ellipsis = text.text(' ').append('tspan').text('...');
+ // calculate temporary minimal text width
+ const minLimitedTextWidth = ellipsis.node().getComputedTextLength();
+ // make "ellipsis" text object
+ ellipsis = text.text('').append('tspan').text('...');
+
+ // calculate the total text width: text + ellipsis
+ // one important note here: text.attr('width') has taken from the
+ // following code fragment of "":
+ /*
+ svg
+ .selectAll('text')
+ .data(normalizedData)
+ // ...
+ .attr('width', (d) => d.width - 1)
+ */
+ // that's why we must define width attribute for the text if we want to get
+ // behavior of the functionality
+ const width =
+ parseFloat(text.attr('width')) - ellipsis.node().getComputedTextLength();
+ // total number of characters
+ const numChars = chars.length;
+ // make unlimited version of the string
+ const tspan = text.insert('tspan', ':first-child').text(chars.join(''));
+
+ // the following case covers the situation
+ // when we shouldn't display the string at all event with ellipsis
+ if (width <= minLimitedTextWidth) {
+ tspan.text('');
+ ellipsis.remove();
+ return;
+ }
+
+ // make the limited string
+ while (tspan.node().getComputedTextLength() > width && chars.length) {
+ chars.pop();
+ tspan.text(chars.join(''));
+ }
+
+ // if all characters are displayed we don't need to display ellipsis
+ if (chars.length === numChars) {
+ ellipsis.remove();
+ }
+ });
+}
+I hope that’s it for dotme ;)
+You can use the function above quite simply. You just need to call the following:
+svg.selectAll('text').call(dotme);
+Despite repeating this point, I need to highlight it again due to its importance. We must define the width attribute for the text.
+ svg
+ .selectAll('text')
+ .data(normalizedData)
+ .join('text')
+ // ...
+ .attr('width', (d) => d.width - 1)
+ // ...
+Otherwise dotme gives wrong behavior. See the following code:
+ const width =
+ parseFloat(text.attr('width')) - ellipsis.node().getComputedTextLength();
+Now it’s time to run the app. But before I want to highlight one crucial point regarding D3 usage. Let’s look at the following line of code:
+svg.selectAll('*').remove();
+The code above clears all graphical stuff on the SVG. We should do it because we need to redraw the component, which means that the previous SVG objects need to be rejected. You can remove this line, rerun the app and change the window size. I recommend trying it if you want to feel how D3 works. +Here is a video of the final solution in action!
+Founded in 2013, Valor Software is a software development and consulting company specializing in helping businesses modernize their web platforms and best leverage technology. +By working with Valor Software, businesses can take advantage of the latest technologies and techniques to build modern web applications that are more adaptable to changing needs and demands while ensuring best practices through unparalleled OSS access via our team and community partners. +Reach out today if you have any questions sales@valor-software.com
+Web application development has undergone significant changes over the years, as developers and businesses seek to create more scalable, maintainable and user-friendly applications. In this short snippet, we will explore the evolution of web application development, from the traditional way of building monolithic applications to the more modern trends of using micro-frontends and Module Federation.
+Monolithic web applications are those where everything is part of the same front-end application and the request goes to only one huge back-end. This is the simplest and easiest way to build a web application, but it has many drawbacks. For example, a monolithic web application is hard to scale, maintain, and update. It also creates a tight coupling between the front-end and the back-end, which limits the flexibility and reusability of the code. A monolithic web application is like a single executable file or directory that makes deployment easier, but also makes changes more difficult.
+Direct access with one front-end to multiple microservices is an improvement over the monolithic approach. In this case, everything is still part of the same front-end application, but the requests are made to different microservices. Microservices are small, independent, and loosely coupled units of functionality that communicate with each other through APIs. This way, a web application can achieve better scalability, modularity, and performance. However, this approach still has some challenges. For instance, it can increase the complexity and overhead of managing multiple microservices. It also does not solve the problem of code duplication and inconsistency across the front-end.
+Micro-frontend and module federation are the future of web application development aimed at large companies, applications, and teams. In this approach, multiple containers are being consumed in one shell app. Each container represents a micro-frontend that encapsulates a specific feature or domain of the web application. The shell app acts as a host that dynamically loads and renders the micro-frontends based on the user’s needs and preferences. Module federation is a technique that enables the shell app and the micro-frontends to share code and dependencies without bundling them together. This way, each micro-frontend can be developed, deployed, and updated independently of each other and from the shell app.
+Micro-frontend and module federation offer many benefits for web application development. They enable faster delivery, better quality, and more innovation of web features. They also promote team autonomy, code reusability, and consistency across the web application. They also reduce the risk of breaking changes and conflicts among different parts of the web application.
+Web application development has evolved from monolithic to micro-frontend and module federation approaches to deal with the increasing complexity and variety of web applications. These approaches provide more flexibility, scalability, and efficiency for web developers and users alike.
+Technical interviewing in the lack of time always looks challenging because, as an interviewer, I mustn’t have room for mistakes. Probably not worth mentioning that a "question from a book" is often a false friend. Instead of the original answer, my interviewee could reproduce it mechanically, i.e., read from Wikipedia in the worst case or read before the interview without deep knowledge of the best one. But I’m not a judge. I’m just a searcher for my future colleague to join my team! I’m always trying to deal with a dilemma. The dilemma is that, on the one hand, the technical question should be less well-known because, in the case of a positive answer to the famous question, I can’t correctly estimate this answer due to the above. Let’s illustrate this case. Imagine you start asking about OOP. +You asked, Please, tell me about the OOP principles? The candidate answered perfectly. What is your conclusion regarding the candidate’s knowledge? Are they firm or not? Does the candidate know the topic, or did he/she just read this answer on Wikipedia? Should I proceed with another similar question on this topic? The issue is that you don’t have enough time for that. Also, you shouldn’t spam the candidate with many such questions or waste time on irrelevant questions. How to fix this situation?
+I see only one promising approach. You need to ask indirect, preferably practical, questions instead. In this case, for example, you can ask the following. Does it possible to instantiate an abstract class? And after that, ask some other additional questions. I suppose after answers gathering, you understand the candidate’s knowledge.
+On the other hand, your "tricky" question should not be so tricky. It’s crucial because, imagine, the candidate didn’t answer. I want to ask the almost same as above. What is your conclusion regarding the candidate’s knowledge? Are they firm or not? Does the candidate know the topic, or was he/she confused due to lack of time and the complexity of the question? Is the question too contextual?
+A good interview question is always about the balance between simplicity and complexity, straightforward and contextuality. A good interview looks like art. Please, remember that the form of the question and the estimated answer could be variated due to the expected candidate level. Please, attend to every candidate more comprehensively and evaluate their skills more personally because he/she is possibly your colleague in the nearest future.
+I want to switch to the most challenging part of this article and illustrate a more complicated case. Imagine you are an interviewer for a NodeJS developer position and need to cover some critical knowledge points.
+Let’s get started. As a technical interviewer, I prefer to start from the authentic code fragment. Here is the following one.
+const p1 = new Promise(resolve => {
+ console.log('start first promise', new Date());
+ console.time('first');
+ for (let i = 0; i < 9999999999; i++) {
+ }
+ resolve(1);
+ console.timeEnd('first');
+ console.log('finish first promise', new Date());
+});
+const p2 = new Promise(resolve => {
+ console.log('start second promise', new Date());
+ console.time('second');
+ setTimeout(() => {
+ resolve(2);
+ console.timeEnd('second');
+ console.log('finish second promise', new Date());
+ }, 2000)
+});
+const p3 = new Promise(resolve => {
+ console.log('start third promise', new Date());
+ console.time('third');
+ setTimeout(() => {
+ resolve(3);
+ console.timeEnd('third');
+ console.log('finish third promise', new Date());
+ }, 10000);
+});
+
+(async () => {
+ const r = await Promise.all([p1, p2, p3]);
+ console.log(r);
+})();
+And here is the result.
+start first promise 2023-08-17T11:50:10.064Z
+first: 9.444s
+finish first promise 2023-08-17T11:50:19.513Z
+start second promise 2023-08-17T11:50:19.513Z
+start third promise 2023-08-17T11:50:19.514Z
+second: 2.002s
+finish second promise 2023-08-17T11:50:21.516Z
+third: 10.001s
+finish third promise 2023-08-17T11:50:29.515Z
+I’m not fond of the "interviewer-interviewee" style during our conversation. As I meant before, I’m searching for my future colleague. That’s why my first question will be the following. What do you think about the code above? I’m asking this way because I need to know how the interlocutor thinks and what parts of the topic are important to him/her, and depending on the first clues, I’ll build my future interview tactics.
+And now, it’s time for the interview’s most creative, exciting, and unpredictable part. Let me provide you with some possible scenarios as dialogs.
+Please, read my thoughts under the dialog details carefully. I provided them because I want to reveal the holy of holies of the dialog - my own thoughts! I hope it helps you understand my thinking and related future steps. It’s important because there is no "silver bullet" or generic pattern regarding the interview. That’s why it’s critically important to understand how I think here and adapt it to your practice.
+Interviewer: What do you think about the code above?
+Candidate: Promise.all() static method takes an iterable of promises as input and returns a single Promise. This returned promise fulfills when all of the input’s promises fulfill (including when an empty iterable is passed), with an array of the fulfillment values.
+Oops N1! Looks like a Wikipedia-based answer.
+Interviewer: Do you have nothing more to add?
+In this case, I prefer to ask brief, direct questions like the above.
+Candidate: No
+Oops N2! Really? Are you kidding me? (Remember, it’s just a thought in my mind. Please don’t say this way in the interview!) There are two explanations for my interviewee’s behavior. The first one is a lack of knowledge, and the second one is about we are not on the same page or he/she is hesitating. Let’s understand the reason.
+Interviewer: Could you explain why the result looks differently from, say, the following?
+start first promise ...
+start second promise ...
+start third promise ...
+finish second promise ...
+finish first promise ...
+finish third promise ...
+In other words, I wondered whether my interlocutor could explain why the first promise works synchronously. This proposal above is quite provocative because it contains 100% incorrect flow. This question is the best indicator for understanding the candidate’s level. In other words, this is the catharsis of the interview. BTW, I recommend providing the candidate with some error-based code or results. This way is perfect from a technical and psychological point of view at the same time. The best way to unblock the candidate’s stuck brain is to discuss incorrect flow. Moreover, after the candidate successfully resolved the issue, he/she became more confident.
+Candidate: First, Promises are not regarding multithreading. Second, if we face long-executing synchronous code, then this code will block the whole solution. In this example, the first promise starts and blocks others because the following code
+console.log('start first promise', new Date());
+ console.time('first');
+ for (let i = 0; i < 9999999999; i++) {
+ }
+ resolve(1);
+ console.timeEnd('first');
+ console.log('finish first promise', new Date());
+executes by the V8 engine consistently in the time, and no other code in the app can interrupt it internally. Moreover, I want to explain why the last couple of promises run "almost simultaneously." They start after the first one has been resolved. The difference is that a couple last have asynchronous code based on timers. That’s why no one from them blocks each other. Also, if we talk about I/O operations like fs.readFile then these operations will be executed in separate threads according to NodeJS architecture. That’s why if we have an array of promises only with similar (execution time) asynchronous I/O operations inside, then Promise.all details will be executed almost simultaneously.
+Cool! The candidate’s mind has been unblocked! It’s a pleasure to me cause this kind of situation. The answer above is not academic and is a bit compacted, but it shows me the candidate’s level. Additionally, I’d like to ask some questions regarding the history of NodeJS, Event Loop, and LibUV. But it’s optional stuff. Generally, I’m satisfied with the answers.
+I asked the additional question because the start of the answer looks like a definition from the well-known resource, and as an interviewer, I need to be sure that the candidate’s words come from himself. It’s important!
+Interviewer: What do you think about the code above?
+Candidate: Well, we shouldn’t put such a heavyweight code into a promise like in Promise 1.
+I’m skeptical about this way of answering because a bit different question was responded to. I didn’t ask, "What should we do?" or "How can we fix it?" Even though the answer looks wise, I need to clarify the topic.
+If you get an inappropriate answer such above, please, don’t repeat the question or don’t tell the candidate that he/he has answered another question. It doesn’t work. Believe me! In my mind, it’s a bit impolite, and this way can ruin the relationship between your interlocutor and you. It’s important even if you are already decided regarding the candidate. Please, be intelligent and polite!
+Interviewer: But what happens if we put the "lightweight" code?
+Attention! This is a psychological trick. I intentionally ignored the "heavyweight" definition and used the antonym blindly. It’s not important now. I need to 100% understand the candidate’s way of thinking.
+Candidate: I guess the promises will run simultaneously in the case of Promise.all.
+Oops N1! I just caused him/her to give me the "real" answer and proceed with this topic.
+Interviewer: Could you explain why?
+Let’s finish this topic. This point is the catharsis of the interview.
+Candidate: In my last project, I implemented a similar code like the following, and all promises worked simultaneously.
+const p1 = new Promise((resolve, reject) => {
+ fs.readFile('./test-1.txt', 'utf8', (err, data) => {
+ if (err) {
+ reject(err);
+ return;
+ }
+ resolve(data)
+ });
+});
+const p2 = new Promise((resolve, reject) => {
+ fs.readFile('./test-2.txt', 'utf8', (err, data) => {
+ if (err) {
+ reject(err);
+ return;
+ }
+ resolve(data)
+ });
+});
+const p3 = new Promise((resolve, reject) => {
+ fs.readFile('./test-3.txt', 'utf8', (err, data) => {
+ if (err) {
+ reject(err);
+ return;
+ }
+ resolve(data)
+ });
+});
+
+(async () => {
+ const r = await Promise.all([p1, p2, p3]);
+ console.log(r);
+})();
+Let me explain what happens here. The candidate provided a 100% valid code with the correct result. Despite the similarity between this and the original example, the current example has another context. This kind of answer is a good indicator of the issues in the candidate’s knowledge. He/she provided this example mechanically due to lacking fundamental skills.
+Interviewer: Could you explain why they work "simultaneously"?
+Oops N2! I’m generally disappointed about my candidate!
+Candidate: Oh, I didn’t have a chance to dig deeply. I’m not a theoretical guy. It’s a feature of Promise.all, I guess.
+Oops N3! I’m always disappointed about this kind of candidate because they spent some time for practice and have been working on real projects for some years. Although, my candidate hasn’t become a proper software developer. I’m always so sorry in this kind of situation.
+Interviewer: Do you know the difference between fs.readFile and other code, for example, the plain loop (as in my original example), in terms of the code execution?
+The last chance! I must be 100% sure the candidate is unfamiliar with the topic.
+Candidate: Unfortunately, no:(
+I suppose now you understand how a dialog is essential and how important to finish the answer logically. In this case, the candidate started the answer more optimistically than in the previous scenario. But in fact, he doesn’t match as a proper developer. The lack of knowledge here is critical, and I’m not satisfied with the answers.
+BTW, this kind of candidate is the most "dangerous" because, at first glance, he/she has substantial knowledge, especially practical skills. But gaps in the fundamental skills could cause severe troubles in the company, which will engage the candidate. This is why the interviewing experience in the company is so critical.
+At this point, I want to proceed with another example of another technology from a different point of view. I want to share the Golang illustration that allows us to understand how important to count the technological specific during the mandatory question preparation.
+Let’s consider Golang. As a technical interviewer, I want to start with only one eloquent question covering many sensitive points. It will help me to ask my future questions more effectively. One of the most critical topics in Golang is goroutines and channels. Formally, there are a lot of points on this topic in literature and guides. The team will waste a lot of time in case of brute-force asking. Moreover, in this case, as a technical interviewer, you can estimate only book-based knowledge, not skills! And what about the candidate’s nerves?
+Let’s choose another way. There is the following minimalistic fragment of code.
+package main
+
+import "fmt"
+
+func main() {
+ messages := make(chan string)
+
+ messages <- "message 1"
+ messages <- "message 2"
+
+ fmt.Println(<-messages)
+}
+Interviewer: How could you fix the code above?
+Both of us, Gophers, understand that the code above contains a very silly mistake;)
+Candidate: There is a deadlock here because, first, we are working on the same thread, and second, the channel above is unbuffered. Let me provide you with the ways to fix it.
+The first approach is the simplest one. We need to make the channel buffered.
+package main
+
+import "fmt"
+
+func main() {
+ messages := make(chan string, 2)
+
+ messages <- "message 1"
+ messages <- "message 2"
+
+ fmt.Println(<-messages)
+}
+Honestly, the original code above is unusual because channel approach usage is a good way to communicate between goroutines. Let me provide a proper way to use them.
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ c := generator()
+ receiver(c)
+}
+
+func receiver(c <-chan int) {
+ for v := range c {
+ fmt.Println(v)
+ }
+}
+
+func generator() <-chan int {
+ c := make(chan int)
+
+ go func() {
+ for i := 0; i < 10; i++ {
+ c <- i
+ }
+ close(c)
+ }()
+
+ return c
+}
+Interviewer: Brilliant! Well done.
+This question saved the team’s time because we partially covered two topics from Golang and got a positive result about the candidate’s practical skills.
+Are you feeling the difference between the question above and What pros and cons of Golang?
+Of course, I provided only a tiny slice of the imaginable interview as an illustration. There are more questions, interactions, and situations in real life. I offered just some of typical. But in any case, your interview should follow the principles below. They could be interesting outside of technical society only. That’s why I recommend reading the following to different roles including recruiters, HRs, PMs, etc. But stop, no principles! I’ll have the courage and say more…
+Prepare for the interview, including plan, code, and questions according to the candidate’s CV, position, and specific technology. Please, read the CV before. Imagine your candidate according to the CV and check if your imagination matches your feelings after the interview. 100% of the material should be at hand. It helps you decide what questions need to be taken from the pool. Any preparation during the interview is entirely unacceptable because time is valuable!
+Keep the dialog and collaboration. Be on the same page with the candidate. Be sophisticated.
+In the case of an unreasonably long answer, don’t hesitate to interrupt the candidate politely. Remember, your time and your colleagues` time is valuable!
+Follow the plan. Some questions should be mandatory. Your plan is a kind of pilot’s checklist. Any diversion could cause a crash in the future.
+Remember that practice is the criterion of the truth. Discuss actual code as often as possible.
+Creativity and variability. Don’t repeat the same questions. Don’t discuss the same subject too much. Other topics are valuable too. Change the plan (except for mandatories) on the fly if necessary; your deep knowledge will help you. Remember, you are an expert. Be flexible!
+Keep a pure mind and be pragmatic. Your emotions are your enemy. Objectivity is your friend, although you are not a judge. Even a person’s poor appearance at first glance tells you nothing regarding his/her IQ/EQ.
+Communicate productively with other team members like developers, recruiters, etc. Don’t try taking advantage. Give time to your colleagues. Analyze answers to their questions.
+Keep all essential points during the interview, even soft skill-related. Don’t miss this information during your result-making because you don’t know which point will be decisive when the CEO or someone else from superiors makes the final decision. Your goal is to provide as objective as possible information regarding the candidate.
+Try to ask about the candidate’s fate. Does the company offer him/her? Decline? Why? Does he/she approve the offer? First, because you are a part of the company. Second, this practice will broaden your horizons.
+Offtopic! If you want to get pleasure from coding, please read my previous Game-n-Qwik series.
+At the NextJS conference there were a lot of really exciting things that were announced, while a lot of the new functionality centered around Vercel-centric offerings which will greatly improve the developer experience and performance there was one new project that was talked about a good amount and that was Turbopack. Created by Tobias and the team at Vercel, Turbopack was advertised as the “next evolution of Webpack” and the “Twilight of Webpack”, the conversations in Discord during the presentation bounced between “Webpack Killer” and the more cautionary “This is still alpha should we use it”, which seems about on par with what to expect from developers seeing a new shiny tool.
+This post is not intended to detract from anything the Vercel / Next team is doing with Turbopack, nor detract from the Next.js conference itself. A conference that had one of the best overall event production and fluidity I’ve seen outside of Apple events.
+During her conference presentation on stage J, Maia Tegarden shared that Turbo is 700x faster than Webpack and 10x faster than Vite, the main differences being the advanced caching mechanisms and the build platform moving to Rust. There was one large comparison that I believe the presentation left out, most likely intentionally, comparing Tunrbopack to Webpack with Nx. Nx has its own caching mechanisms both locally and in the cloud (which organizationally would be more beneficial for enterprises with heavily distributed teams).
+In order to do some testing and benchmarking, I decided to follow the guide on starting a project with turbo. Luckily the Vercel team makes it super easy to get started.
+npx create-next-app --example with-turbopack
+This single command scaffolded the demo application easily and running npm run build the initial compilation is 2.19ms which is surprisingly fast. Opening the URL that was output by the command, the first-page load event took 1.57s and the CLI itself mentioned that the output was updated in 960ms. Refreshing the page multiple times took between 311ms and 374ms to load and triggered two updated events averaging between 107ms and 154ms.
+
+
In general a very fast experience.
+
+
Updating a CSS class on the layout.tsx to change the y spacing ( a very simple action ), triggered two update events that took roughly 200ms each. A page refresh took 583ms on the browser and triggered two events taking 390ms and 784ms respectively. Oddly sometimes refreshing the page would take several seconds on the event timings with some instances taking 800ms to 3400ms and up.
+
+
Now for Nx we have a very similar command that we can run:
+npx create-nx-workspace@latest
+This single command kicks off a CLI wizard where I selected Next and CSS as the options. The default project builds and serves extremely fast, but this project is not a fair comparison because the example from Vercel uses Tailwind and more complex component architecture, as well as Next 13. After copying in all of the application code from the Next example into the Nx workspace, I also had to configure the application to use PostCSS and ran the same page refreshes and changes. The example repo is found here. Because the official next template has not yet been updated to Next 13 (which was just released) you will have to do a force install.
+
+
Starting the application with npm run start took roughly 773ms, and opening the application with the link triggered 3 events, each of those events took between 254ms to 914ms. The page load event took 1.16s. Refreshing the page multiple times took on average: 516ms and 612ms for the load event. No CLI events were triggered during the page refresh.
+
+The experience was feeling equally fast even though they are in reality different by a few hundred milliseconds.
+
+
Doing the updates of the exact same CSS classes triggered a single event that took between 153ms to 442ms. Reloading the page triggered an additional two events which took 95ms and 250ms respectively.
+
+
Turbopack looks promising but still has some room for improvement from a performance standpoint. I’d love to see what is coming with the next version of Nx and the updates made to better support Next 13. However, outside of the initial startup, the actual development activity and JIT updates appears to be faster with Webpack and Nx than with Turbopack.
+Because Nx also can leverage multiple builders, including esbuild, I’d also be interested to see Nx working directly with Turbopack itself and how to replace the local cache being used by Turbopack with the cloud cache from Nx.
++ | Turbopack | +Nx + Webpack | +||
---|---|---|---|---|
+ | Min |
+Max |
+Min |
+Max |
+
Initial Startup (CLI) |
+~2ms |
++ | 773ms |
++ |
First Page Load (Browser) |
+~1.6s |
++ | 1.2s |
++ |
First Page Load Events (CLI) |
+~960ms |
++ | ~914ms |
++ |
Page Refresh (Browser) |
+~300ms |
++ | 516ms |
+612ms |
+
Page Refresh (CLI) |
+~100ms |
++ | none* |
+none* |
+
HTML Class Updates (CLI) |
+~580ms |
+>1.5s |
+95ms |
+250ms |
+
HTML Class Updates (Browser) |
+> 400ms (two events ~200ms each) |
+> 3s (two events 800ms - 3.5s each) |
+150ms |
+442ms |
+
*no compile events were reported, however that does not mean none were potentially executed due to possibly being swallowed by Nx
+note: these tests were all performed on a 16” MBP with a M1 Max and 64 GB memory
Here at Valor Software we had the challenge of analyzing some metrics from developers productivity. So, we’ve started questioning ourselves: What are the core daily activities of a developer? On a macro level we could say that is to deliver clean and reliable code on top of a consistent base, but with which commit frequency? And how to correlate code deployment with bugs and other stuff? Is this related to teams, to specific projects or even to the used technology stack?
+In Data Science, we usually start our investigations based on the scientific method (and CRISP-DM approach), to know better the target situation, its surroundings and mainly doing the questions that will drive us to catch the root cause of our problems.
+OK, so much fancy stuff. What is the relationship between all of this and the Valor Software Medusa project? We realized that GitHub is a good provider of data when it comes to developers productivity as we can track information about repositories, commits, tests and much more, this drove us to develop a Data Pipeline to extract and process data from it.
+We have designed a business framework containing the daily core activities of a developer and have splitted it into four Pilars, when it comes to GitHub:
+Version Control Management
Compatibility
Infrastructure
Team
The problem organization is a very important step in Data Science/Engineering projects as it gives us the direction of what data sources we should consume what metrics we are willing to build, as makes no sense to build a rocket dashboard, having metrics that are not related to the operation.
+The idea is based in hitting the GitHub API, collecting the necessary metrics from a range of endpoints, saving the result in an intermediary layer and than load it to the DataBase.
+Why do we use an intermediary layer rather than saving it directly to the DataBase?
We usually follow this approach in order to make the pipeline more resilient. Imagine we spent hours iterating over API pagination, and than some error occurs. In some cases we can suffer data loss and have to restart it all again. Saving the data in an intermediary layer such as AWS S3 or Google Storage makes the pipeline to execute in steps, and also allows us to process the data later, use it in Data Science experiments and so on.
+The application design is based on OOP and contains the following mechanisms:
+App Deployment
Pipeline orchestration
Storage layer
Visualization layer
Infrastructure management
Data pipeline source code
The application deployment is done using Docker and the containers needed to run Airflow with its services are all described in a docker-compose file
+The triggering of the data ingestion and processing jobs can be done throughout the Airflow UI, which uses DAGs to manage all the working code (DAGs stands by Direct Acyclic Graphs and are responsible for managing the tasks of the data pipeline)
+There are two of them in this project. One is the intermediary layer, that stores the raw data from the API calls, organizing it into year/month/day of the request. The other one is the Data warehouse, a database based on PostgreSQL to store the tables containing the processed information.
+The chosen app for visualizing data at our data warehouse is Apache Superset. Considering it is free, Superset is an incredible tool. From my experience it has most of the features we can find in the famous and paid Power BI. In addition, Superset is also ready for streaming needs and is cluster scalable.
+The infra is deployed at the Google Cloud, and the necessary resources are created and managed by Terraform
+The code design is based on two strong objects which are intended to interact with each other so as to ingest, process and write data from different data sources to different destinations, all based on json configuration files.
+- Hook:
Responsible for interface with external services, like the GitHub API, the Cloud storage (GCP) and the Data warehouse, holding its credentials and authentication methods.
- Operator:
Responsible for different methods operation (call) on top of the data and trigger functions like:
Download data
Filter data
Calculate metrics
Upload logs
also holding configurations, restrictions and other information about the data object that is being ingested.
+For the specific case of GitHub, the authentication is done using the account token. It can be a User or an Organization, resulting in a flexible object as User and Organizations have different API calls to retrieve similar categories of data. Just like in the representation bellow:
+Once the data is requested from the API, based on the configuration file, it is stored in Google Cloud Storage. +Once the data is properly downloaded to this intermediary layer, the Operator calls the configuration file to filter the correct information from the raw data, opening a way to the next step: data transformation, and metrics calculation.
+The creation of data pipelines can become something really complex if we do not care about details like the creation of generic functions, the config files approach, as per reuse code and make the processing more flexible.
+So, at the end we have a data warehouse to consume data, making it available to the Medusa App and to the dashboard tool. This way, managers, product managers or PO’s can create their own views, test their hypothesis or even find answers with the power of the data.
+View of the data pipeline architecture:
+No matter what social media platform you are on if you follow the tech space, or any space for that matter, you are most likely seeing content about ChatGPT or even content generated by ChatGPT.
+When it first came out a few weeks ago, I played with it, thought it was pretty cool and had my kids started using it to write guided stories with ideas from their imagination. When combined with MidJourney AI generated art the overall experience was quite amazing.
+Last night seeing @scriptedalchemy share about his experiences with it I decided to try something a little more challenging.
+A lot of my focus lately is on ModuleFederation and webpack. I asked myself what could ChatGPT do with that combination and what would be fun to see. My prompts and directions will be in quote blocks.
+The first question I asked the site:
+++++Create a webpack plugin in typescript that wraps module federation and adds additional logging
+
Below is the first reply:
+++++To create a webpack plugin in TypeScript that wraps module federation and adds additional logging, you can follow these steps:"
+++Create a new TypeScript project and install the necessary dependencies:
++++++npm init -y +npm install --save-dev webpack webpack-cli typescript ts-loader
++Create a tsconfig.json file to configure the TypeScript compiler. In the compilerOptions object, set the module option to "commonjs" and the target option to "esnext". This will ensure that the generated JavaScript code is compatible with webpack.
++++++{ + "compilerOptions": { + "module": "commonjs", + "target": "esnext" + } +}
++Create a webpack.config.ts file to configure webpack. In the configuration object, set the mode option to "development" and add a resolve object to specify that webpack should resolve TypeScript files.
++++++import { Configuration } from 'webpack'; + +const config: Configuration = { + mode: 'development', + resolve: { + extensions: ['.ts', '.tsx'] + }, + // Add other configuration options here +}; + +export default config;
++In the webpack.config.ts file, add a module object to specify that webpack should use the ts-loader to transpile TypeScript files.
++++++import { Configuration } from 'webpack'; + +const config: Configuration = { + // Other configuration options + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + } + ] + } +}; + +export default config;
++Create a src directory and add an entry point file, such as src/index.ts. This will be the file that webpack will use as the starting point for the build.
+++In the webpack.config.ts file, add an entry object to specify the entry point file and a output object to specify the path and filename of the bundled JavaScript file.
++++++import { Configuration } from 'webpack'; + +const config: Configuration = { +// Other configuration options +entry: './src/index.ts', +output: { +path: __dirname + '/dist', +filename: 'bundle.js' +} +}; + +export default config;
++Install the necessary dependencies for module federation:
++++++npm install --save-dev @module-federation/client
++In the webpack.config.ts file, import the ModuleFederationPlugin from the @module-federation/client package and add it to the plugins array in the configuration object. You can also specify the remotes option to specify the remote modules that your application should be able to access.
++++++import { Configuration } from 'webpack'; +import { ModuleFederationPlugin } from '@module-federation/client'; + +const
You can see that the last code block is incomplete. Sometimes, on more lengthy requests, the chatbot can time out, I tried, quite literally, asking it to keep going.
+++++Keep going
+
"Sure, here are the next steps:", said ChatGPT
+++++Create a TypeScript interface for the plugin’s options. This will allow you to specify the options for the plugin when it is added to the plugins array in the webpack configuration.
++++++export interface ModuleFederationLoggingPluginOptions { + // Add options here +}
++Create a new class for the plugin that extends the ModuleFederationPlugin class and implements the apply method. In the apply method, you can wrap the ModuleFederationPlugin and add additional logging.
++++++import { Compiler, Plugin } from 'webpack'; +import { ModuleFederationPlugin } from '@module-federation/client'; +import { ModuleFederationLoggingPluginOptions } from './options'; + +class ModuleFederationLoggingPlugin implements Plugin { + private readonly options: ModuleFederationLoggingPluginOptions; + private readonly moduleFederationPlugin: ModuleFederationPlugin; + + constructor(options: ModuleFederationLoggingPluginOptions) { + this.options = options; + this.moduleFederationPlugin = new ModuleFederationPlugin(options); + } + + apply(compiler: Compiler) { + this.moduleFederationPlugin.apply(compiler); + + // Add additional logging here + } +}
++In the webpack.config.ts file, import the ModuleFederationLoggingPlugin class and add an instance of it to the plugins array in the configuration object. You can also specify the options for the plugin by passing an object to the constructor.
++++++import { Configuration } from 'webpack'; +import { ModuleFederationLoggingPlugin } from './module-federation-logging-plugin'; + +const config: Configuration = { + // Other configuration options + plugins: [ + new ModuleFederationLoggingPlugin({ + // Specify options here + }) + ] +}; + +export default config;
++Run webpack to build the project.
++++++npx webpack
++That’s it! You have now created a webpack plugin in TypeScript that wraps module federation and adds additional logging.
+
Seeing what the results were it was quite verbose, but it does not follow the platform I usually like to use, @nrwl_io’s Nx. So I did a redirect on the conversation.
+++++how can I simplify the steps by using Nx
+
Using Nx (Nrwl Extensions for Angular) can simplify the process of creating a webpack plugin in TypeScript that wraps module federation and adds additional logging. Nx is a set of open-source tools and libraries that can help you develop full-stack applications more efficiently.
+To use Nx to create a webpack plugin, follow these steps:
+Install the Nx CLI:
+npm install -g @nrwl/cli
+Create a new Nx workspace:
+npx create-nx-workspace my-workspace
+In the Nx workspace, create a new library for the webpack plugin:
+npx nx g @nx/workspace:lib module-federation-logging-plugin
+In the library directory, create a tsconfig.json file to configure the TypeScript compiler. In the compilerOptions object, set the module option to "commonjs" and the target option to "esnext". This will ensure that the generated JavaScript code is compatible with webpack.
+{
+ "compilerOptions": {
+ "module": "commonjs",
+ "target": "esnext"
+ }
+}
+In the library directory, create a new TypeScript file for the plugin, such as src/module-federation-logging-plugin.ts. In this file, create a new class for the plugin that extends the ModuleFederationPlugin class and implements the apply method. In the apply method, you can wrap the ModuleFederationPlugin and add additional logging.
+import { Compiler, Plugin } from 'webpack';
+import { ModuleFederationPlugin } from '@module-federation/client';
+
+export class ModuleFederationLoggingPlugin implements Plugin {
+ private readonly moduleFederationPlugin: ModuleFederationPlugin;
+
+ constructor(options: any) {
+ this.moduleFederationPlugin = new ModuleFederationPlugin(options);
+ }
+
+ apply(compiler: Compiler) {
+ this.moduleFederationPlugin.apply(compiler);
+
+ // Add additional logging here
+ }
+}
+In the libs/module-federation-logging-plugin/tsconfig.lib.json file, add a typeRoots field to specify the path to the TypeScript declaration files for the webpack and @module-federation/client packages. This will allow the TypeScript compiler to find the type definitions for these packages.
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "outDir": "../../dist/out-tsc",
+ "typeRoots": [
+ "node_modules/@types",
+ "node_modules/@module-federation/client/node_modules/@types"
+ ]
+ },
+ "include": ["src/**/*.ts"]
+}
+In the libs/module-federation-logging-plugin/src/index.ts file, export the ModuleFederationLoggingPlugin class. This will allow other projects in the Nx workspace to use the
+You can see that I received another timeout due to the length of the response. This time instead of just asking it to continue I tried to get it to pick back up from step 7 which was incomplete.
+++++Repeat the content of step 7 and keep going
+
And the chat-bot said:
+++++Sure, here is the content of step 7:
+++In the libs/module-federation-logging-plugin/src/index.ts file, export the ModuleFederationLoggingPlugin class. This will allow other projects in the Nx workspace to use the plugin.
++++++export { ModuleFederationLoggingPlugin } from './module-federation-logging-plugin';
++Here are the next steps:
+++In the Nx workspace, create a new application that will use the webpack plugin:
++++++npx nx g @nx/angular:app my-app
++Install the necessary dependencies for module federation:
++++++npm install --save-dev @module-federation/client
++In the apps/my-app/src/webpack.config.ts file, import the ModuleFederationLoggingPlugin class from the library and add an instance of it to the plugins array in the webpack configuration object. You can also specify the options for the plugin by passing an object to the constructor.
++++++import { Configuration } from 'webpack'; +import { ModuleFederationLoggingPlugin } from '@my-workspace/module-federation-logging-plugin'; + +const config: Configuration = { + // Other configuration options + plugins: [ + new ModuleFederationLoggingPlugin({ + // Specify options here + }) + ] +}; + +export default config;
++Run webpack to build the application:
++++++npx nx run my-app:build
++That’s it! You have now used Nx to create a webpack plugin in TypeScript that wraps module federation and adds additional logging.
+
Just to see how far I can go I wanted to add some documentation. No plugin is complete without documentation right?
+++++Can you add swagger documentation to the src/module-federation-logging-plugin.ts file
+
And the chat-bot delivered the following:
+++++To add Swagger documentation to the src/module-federation-logging-plugin.ts file, you can use Swagger annotations in the form of JSDoc comments. JSDoc is a documentation standard for JavaScript that is widely supported by tools like Swagger.
+++Here is an example of how you can add Swagger documentation to the ModuleFederationLoggingPlugin class:
++++++import { Compiler, Plugin } from 'webpack'; +import { ModuleFederationPlugin } from '@module-federation/client'; + + +/** +* A webpack plugin that wraps module federation and adds additional logging. +* +* @param {Object} options - The options for the plugin. +* @param {string} options.remoteName - The name of the remote module. +* @param {string} options.remoteType - The type of the remote module. +* @param {string} options.remoteUrl - The URL of the remote module. +*/ +export class ModuleFederationLoggingPlugin implements Plugin { +private readonly moduleFederationPlugin: ModuleFederationPlugin; + + constructor(options: any) { + this.moduleFederationPlugin = new ModuleFederationPlugin(options); + } + + apply(compiler: Compiler) { + this.moduleFederationPlugin.apply(compiler); + + // Add additional logging here + } +}
++You can then use a tool like Swagger UI or ReDoc to generate a documentation website from the Swagger annotations. To do this, you will need to generate a Swagger JSON file from the annotations. There are various tools and libraries that can do this, such as swagger-jsdoc and swagger-ui-express.
+++Here is an example of how you can use swagger-ui-express to generate a Swagger documentation website:
+++Install the necessary dependencies:
++++++npm install --save swagger-ui-express swagger-jsdoc
++Create a swagger.js file with the following code:
++++++const swaggerUi = require('swagger-ui-express'); +const swaggerJSDoc = require('swagger-jsdoc'); + +// Swagger definition +const swaggerDefinition = { + openapi: '3.0.0', + info: { + title: 'Module Federation Logging Plugin', + version: '1.0.0', + description: 'A webpack plugin that wraps module federation and adds additional logging.' + }, + servers: [ + { + url: 'http://localhost:3000' + } + ] +}; + +// Options for the swagger-jsdoc library +const options = { + swaggerDefinition, + apis: ['./src/module-federation-logging-plugin.ts'] +}; + +// Initialize the Swagger document +const swaggerSpec = swaggerJSDoc(options); + +module.exports = app => { + app.use('/api-docs', swaggerUi.serve, swaggerUi.setup(swaggerSpec)); +};
++In your application’s main file, such as index.js, require the swagger.js file and pass the express app instance to it:
++++++const express = require('express'); +const app = express();
After about 5 minutes I have code that is pretty close to what I was expecting. It has more comments and guidance than I would have created on my own, and for the most part, follows some pretty decent best practices.
+ChatGTP was able to accept redirects that were pretty technical in nature, with little context, and adjust the output appropriately.
+While not a replacement for a developer that is experienced this tooling and process can help to learn new frameworks, patterns and create functionality more rapidly.
+Combining highly skilled engineers with ChatGPT will in the near future (if not now) increase their velocity. Combining new engineers with ChatGPT will aid in their exposure to practices that they can then begin to understand and question in their learning journey.
+What exciting times we live in!
+It’s no secret that the rules on pure React solutions are pretty simple. We just need to properly use all React inventory like useState, useEffect, useMemo, and useCallback. There are many intelligent articles, guides, and examples on the topic. But let’s answer the following question.
+How many "pure" projects did you tackle?
+A pure React (Angular, NodeJS, etc.) project could look like nonsense in real life. Customers expect complicated solutions, including different 3-rd party stuff like Payment Systems, Graphical Libraries, CRM integrations, Tracking Tools, etc. Obviously, not all of them are React-friendly, so we should count these libraries' features in most cases and try to perfect React code simultaneously.
+Today I want to tell you some performance specifics across React and D3.
+I wrote several articles on the D3 topics before, and I guess it will also be interesting for you. There are the following related articles.
+But let’s focus on the current topic.
+D3.js is a JavaScript library for manipulating documents based on data. D3 helps you bring data to life using HTML, SVG, and CSS. D3’s emphasis on web standards gives you the full capabilities of modern browsers without tying yourself to a proprietary framework, combining powerful visualization components and a data-driven approach to DOM manipulation.
+D3 is great! I’m fond of this beautiful library. But it lives its own life. That’s why we need to remember this fact when we work with D3 outside Vanilla JS, say, in React.
+The goal is a simple D3 line chart implemented with dynamic guideline movement every second.
+I’m pretty sure the best way to understand what’s good is to explain what’s wrong. That’s why I will start my solutions from the worst example. I’m also going to explain why the example is so wrong, and after that, I’ll propose to you the best way of implementation.
+There is the following component represents a line chart.
+import React, { useEffect, useRef } from "react";
+import * as d3 from "d3";
+
+const transform = "translate(50,50)";
+
+export default function LineChart({ data, width, height, marker }) {
+ const svgRef = useRef();
+
+ const renderSvg = () => {
+ const chartWidth = width - 200;
+ const chartHeight = height - 200;
+
+ const svg = d3.select(svgRef.current);
+
+ svg.selectAll("*").remove();
+
+ const xScale = d3.scaleLinear().domain([0, 100]).range([0, chartWidth]);
+ const yScale = d3.scaleLinear().domain([0, 200]).range([chartHeight, 0]);
+
+ const g = svg.append("g").attr("transform", transform);
+
+ g.append("g")
+ .attr("transform", "translate(0," + chartHeight + ")")
+ .call(d3.axisBottom(xScale));
+
+ g.append("g").call(d3.axisLeft(yScale));
+
+ svg
+ .append("g")
+ .selectAll("dot")
+ .data(data)
+ .enter()
+ .append("circle")
+ .attr("cx", function (d) {
+ return xScale(d[0]);
+ })
+ .attr("cy", function (d) {
+ return yScale(d[1]);
+ })
+ .attr("r", 3)
+ .attr("transform", transform)
+ .style("fill", "#CC0000");
+
+ const line = d3
+ .line()
+ .x(function (d) {
+ return xScale(d[0]);
+ })
+ .y(function (d) {
+ return yScale(d[1]);
+ })
+ .curve(d3.curveMonotoneX);
+
+ svg
+ .append("path")
+ .datum(data)
+ .attr("class", "line")
+ .attr("transform", transform)
+ .attr("d", line)
+ .style("fill", "none")
+ .style("stroke", "#CC0000")
+ .style("stroke-width", "2");
+
+ if (marker) {
+ svg
+ .append("svg:line")
+ .attr("transform", transform)
+ .attr("stroke", "#00ff00")
+ .attr("stroke-linejoin", "round")
+ .attr("stroke-linecap", "round")
+ .attr("stroke-width", 2)
+ .attr("x1", xScale(marker))
+ .attr("y1", 200)
+ .attr("x2", xScale(marker))
+ .attr("y2", 0);
+ }
+ };
+
+ useEffect(() => {
+ renderSvg();
+ }, [width, height, data, marker]);
+
+ if (!width || !height || !data) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+}
+This component takes the following props.
+data - chart data as a two-dimensional array of x and y
+width - with of the chart
+height - height of the chart
+marker - X axis of a guideline
+And there is a related parent component.
+import React, { useState, useEffect } from "react";
+import LineChart from "./LineChart";
+import "./style.css";
+
+const data = [
+ [1, 1],
+ [12, 20],
+ [24, 36],
+ [32, 50],
+ [40, 70],
+ [50, 100],
+ [55, 106],
+ [65, 123],
+ [73, 130],
+ [78, 134],
+ [83, 136],
+ [89, 138],
+ [100, 140],
+];
+
+export default function App() {
+ const [marker, setMarker] = useState(10);
+
+ useEffect(() => {
+ const intervalId = setInterval(() => {
+ setMarker((prevMarker) => (prevMarker + 10 > 100 ? 10 : prevMarker + 10));
+ }, 1000);
+
+ return () => {
+ clearInterval(intervalId);
+ };
+ }, []);
+
+ return (
+ <div id="root-container">
+ <LineChart data={data} width={500} height={400} marker={marker} />
+ </div>
+ );
+}
+There is an interval refresh a marker value every second and pass it as a chat’s prop.
+You can play with the complete example here.
+It seems it’s nothing foreshadowing the issue. I want to modify the code above. The aim is to show the issue eloquently.
+First, I’ll add a global renders and timeStart variables to public/index.html
+<script>
+ var renders = 0;
+ var timeStart = new Date().toISOString();
+</script>
+<div id="root"></div>
+Second, I increase renders every LineChart render.
+export default function LineChart({ data, width, height, marker }) {
+ // no changes here ...
+
+ renders++;
+
+ useEffect(() => {
+ renderSvg();
+ }, [width, height, data, marker]);
+
+ if (!width || !height || !data) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+}
+And finally, I changed the parent component the following way.
+export default function App() {
+ const [marker, setMarker] = useState(10);
+
+ useEffect(() => {
+ const intervalId = setInterval(() => {
+ setMarker((prevMarker) => (prevMarker + 10 > 100 ? 10 : prevMarker + 10));
+ }, 1000);
+
+ return () => {
+ clearInterval(intervalId);
+ };
+ }, []);
+
+ const currentTime = new Date().toISOString();
+
+ return (
+ <div id="root-container">
+ <div style={{ marginTop: 20, marginLeft: 20 }}>
+ renders: {renders}
+ <br />
+ start: {timeStart}
+ <br />
+ now: {currentTime}
+ </div>
+ <LineChart data={data} width={500} height={400} marker={marker} />
+ </div>
+ );
+}
+The main goal is to display three metrics: the number of renderings, start time, and current time. +Let’s run the modified example.
+As far as we can see, each marker change causes LineChart component to render. If the result above doesn’t persuade you, I have prepared the experiment below. I left the working example for a few minutes and drank coffee.
+When I returned, I saw the following.
+948 render per cup of coffee! Looks awful… +Moreover, a bunch of D3 heavyweight operations covers each render!
+It’s time to fix the issue above.
+First, let me provide you the final LineChart version and explain what’s changed there step by step.
+import React, {
+ useEffect,
+ forwardRef,
+ useImperativeHandle,
+ useRef,
+} from "react";
+import * as d3 from "d3";
+
+const transform = "translate(50,50)";
+
+const LineChart = forwardRef(({ data, width, height }, ref) => {
+ const svgRef = useRef();
+ let svg;
+ let xScale;
+
+ useImperativeHandle(ref, () => ({
+ setMarker: (value) => {
+ if (isNaN(value)) {
+ return;
+ }
+ svg.selectAll(".marker").remove();
+
+ svg
+ .append("svg:line")
+ .attr("transform", transform)
+ .attr("class", "marker")
+ .attr("stroke", "#00ff00")
+ .attr("stroke-linejoin", "round")
+ .attr("stroke-linecap", "round")
+ .attr("stroke-width", 2)
+ .attr("x1", xScale(value))
+ .attr("y1", 200)
+ .attr("x2", xScale(value))
+ .attr("y2", 0);
+ },
+ }));
+
+ const renderSvg = () => {
+ const chartWidth = width - 200;
+ const chartHeight = height - 200;
+
+ svg = d3.select(svgRef.current);
+
+ svg.selectAll("*").remove();
+
+ xScale = d3.scaleLinear().domain([0, 100]).range([0, chartWidth]);
+ const yScale = d3.scaleLinear().domain([0, 200]).range([chartHeight, 0]);
+
+ const g = svg.append("g").attr("transform", transform);
+
+ g.append("g")
+ .attr("transform", "translate(0," + chartHeight + ")")
+ .call(d3.axisBottom(xScale));
+
+ g.append("g").call(d3.axisLeft(yScale));
+
+ svg
+ .append("g")
+ .selectAll("dot")
+ .data(data)
+ .enter()
+ .append("circle")
+ .attr("cx", function (d) {
+ return xScale(d[0]);
+ })
+ .attr("cy", function (d) {
+ return yScale(d[1]);
+ })
+ .attr("r", 3)
+ .attr("transform", transform)
+ .style("fill", "#CC0000");
+
+ const line = d3
+ .line()
+ .x(function (d) {
+ return xScale(d[0]);
+ })
+ .y(function (d) {
+ return yScale(d[1]);
+ })
+ .curve(d3.curveMonotoneX);
+
+ svg
+ .append("path")
+ .datum(data)
+ .attr("class", "line")
+ .attr("transform", transform)
+ .attr("d", line)
+ .style("fill", "none")
+ .style("stroke", "#CC0000")
+ .style("stroke-width", "2");
+ };
+
+ renders++;
+
+ useEffect(() => {
+ renderSvg();
+ }, [width, height, data]);
+
+ if (!width || !height || !data) {
+ return <></>;
+ }
+
+ return <svg ref={svgRef} width={width} height={height} />;
+});
+
+export default LineChart;
+forwardRef +Now LineChart’s parent is able to work with the related component reference.
+const LineChart = forwardRef(({ data, width, height }, ref) => {
+useImperativeHandle +During some interviews, I ask my interviewees this question. I’m surprised because most of them can’t answer it. In my opinion, this hook is as important as the basic like useState and useEffect because it makes your code more flexible and performative.
+Here is the exposed code.
+useImperativeHandle(ref, () => ({
+ setMarker: (value) => {
+ if (isNaN(value)) {
+ return;
+ }
+ svg.selectAll(".marker").remove();
+
+ svg
+ .append("svg:line")
+ .attr("transform", transform)
+ .attr("class", "marker")
+ .attr("stroke", "#00ff00")
+ .attr("stroke-linejoin", "round")
+ .attr("stroke-linecap", "round")
+ .attr("stroke-width", 2)
+ .attr("x1", xScale(value))
+ .attr("y1", 200)
+ .attr("x2", xScale(value))
+ .attr("y2", 0);
+ },
+}));
+I moved it from the end of renderSvg function. See the previous example. +Let’s focus on the parent component. Please, read comments there.
+import React, { useState, useEffect, useMemo, useRef } from 'react';
+import LineChart from './LineChart';
+import './style.css';
+
+const data = [
+ // no changes
+];
+
+export default function App() {
+ const [marker, setMarker] = useState(10);
+ // Provide a reference for LineChart
+ const chartRef = useRef();
+
+ useEffect(() => {
+ // If the marker has been changed set it on LineChart directly, see useImperativeHandle
+ chartRef.current.setMarker(marker);
+ }, [marker]);
+
+ useEffect(() => {
+ const intervalId = setInterval(() => {
+ setMarker((prevMarker) => (prevMarker + 10 > 100 ? 10 : prevMarker + 10));
+ }, 1000);
+
+ return () => {
+ clearInterval(intervalId);
+ };
+ }, []);
+
+ const currentTime = new Date().toISOString();
+
+ // There is a trick because we don't need to render LineChart after every App state variable change
+ // As you can see we don't pass the marker here.
+ const chart = useMemo(() => {
+ return <LineChart ref={chartRef} data={data} width={500} height={400} />;
+ }, [data]);
+
+ return (
+ <div id="root-container">
+ <div style={{ marginTop: 20, marginLeft: 20 }}>
+ renders: {renders}
+ <br />
+ start: {timeStart}
+ <br />
+ now: {currentTime}
+ </div>
+ {chart}
+ </div>
+ );
+}
+According to the comments above, there are three points of change. +1. Provide a reference for LineChart +2. Marker direct setting via useImperativeHandle. Pay attention to the fact that every useImperativeHandle-based call doesn’t cause the component to render. It’s super important! +3. Memoise the LineChart component. We don’t need to refresh it with each App state change.
+Finally, the most tricky stuff has remained.
+After attentional looks at the code above, you could ask a question.
+On the one hand, now the component shouldn’t be re-rendered. On the other hand, the guideline moves from point A to point B. Of course, c#hartRef.current.setMarker(marker);# direct call allows us to set the guideline in the new position. But what approach allows us to remove the previous guideline from point A?
+At the start of the article, I meant that we need to count D3 library features. In this case, we should know two facts below.
+D3 objects are stateful, so we can operate them whenever needed. In this context, please look at the following code.
+ let svg;
+
+ const renderSvg = () => {
+ // ...
+ svg = d3.select(svgRef.current);
+ //All futures results of modifications will be present persistently in SVG object
+};
+According to the feature above, we can change the D3 object every time without re-rendering. Moreover, we can manipulate different chart parts via fake CSS classes.
+Look at the following code.
+ setMarker: (value) => {
+ if (isNaN(value)) {
+ return;
+ }
+ svg.selectAll('.marker').remove();
+
+ svg
+ .append('svg:line')
+ .attr('transform', transform)
+ .attr('class', 'marker')
+ .attr('stroke', '#00ff00')
+ .attr('stroke-linejoin', 'round')
+ .attr('stroke-linecap', 'round')
+ .attr('stroke-width', 2)
+ .attr('x1', xScale(value))
+ .attr('y1', 200)
+ .attr('x2', xScale(value))
+ .attr('y2', 0);
+ },
+ }));
+When we add a guideline, we add a special fake class into it:
+.attr('class', 'marker')
+But before we remove the previous guideline via
+svg.selectAll('.marker').remove();
+That’s all for today about the secrets of D3.
+It’s time to run the final example! You can play with the complete final example here.
+Only two renders per all time. Looks cool! +That’s like music to the ears of React developer!
+PS: If you are wondering why two renders, please read about React Strict Mode.
+