diff --git a/.eslintrc.js b/.eslintrc.js deleted file mode 100644 index 5bf8f8c623..0000000000 --- a/.eslintrc.js +++ /dev/null @@ -1,18 +0,0 @@ -module.exports = { - extends: '@senecacdot/eslint-config-telescope', - - overrides: [ - { - files: ['./src/backend/**/*.js', './test/**/*.js'], - rules: { - 'jest/no-standalone-expect': 'off', - 'no-unused-vars': 'error', - }, - env: { - node: true, - commonjs: true, - jest: true, - }, - }, - ], -}; diff --git a/.github/workflows/docker-build-and-push.yml b/.github/workflows/docker-build-and-push.yml index c9563dcdee..d04933b6cf 100644 --- a/.github/workflows/docker-build-and-push.yml +++ b/.github/workflows/docker-build-and-push.yml @@ -12,11 +12,6 @@ on: required: false default: 'https://dev.telescope.cdot.systems' type: string - api_url: - description: 'The microservice gateway URL (defaults to staging)' - required: false - default: 'https://dev.telescope.cdot.systems' - type: string image_url: description: 'The image microservice URL (defaults to staging)' required: false @@ -88,7 +83,6 @@ jobs: image: nginx build-args: | WEB_URL=${{ inputs.web_url }} - API_URL=${{ inputs.api_url }} IMAGE_URL=${{ inputs.image_url }} SSO_URL=${{ inputs.sso_url }} POSTS_URL=${{ inputs.posts_url }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 04592af0b1..95c9c019ae 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,7 +15,6 @@ jobs: with: deploy_tag: production web_url: 'https://telescope.cdot.systems' - api_url: 'https://telescope.cdot.systems' image_url: 'https://api.telescope.cdot.systems/v1/image' sso_url: 'https://api.telescope.cdot.systems/v1/auth' posts_url: 'https://api.telescope.cdot.systems/v1/posts' diff --git a/.gitpod.yml b/.gitpod.yml index ed9dbdb9eb..66f00f2d46 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -6,7 +6,6 @@ tasks: openMode: split-left # Copying env.development and replacing 'localhost' with Gitpod urls before: | - eval $(gp env -e API_URL=$(gp url 3000)) eval $(gp env -e WEB_URL=$(gp url 8000)) eval $(gp env -e API_HOST=$(gp url 8443)) eval $(gp env -e SSO_LOGIN_URL=$(gp url 8081)) @@ -15,7 +14,6 @@ tasks: sed -r \ -e "s@(.+=)http://localhost:8000(/[^ ]*)*@\1$WEB_URL\2@g" \ - -e "s@(.+=)http://localhost:3000(/[^ ]*)*@\1$API_URL\2@g" \ -e "s@(.+=)http://localhost:8081(/[^ ]*)*@\1$SSO_LOGIN_URL\2@g" \ -e "s@(.+=)http://kong:8000(/[^ ]*)*@\1$SUPABASE_API\2@g" \ -e "s@(.+=)http://localhost([^:]*)@\1$API_HOST\2@g" \ diff --git a/.vscode/launch.json b/.vscode/launch.json index 701ccc0b02..980af28769 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,14 +1,6 @@ { "version": "0.2.0", "configurations": [ - { - "type": "node", - "request": "launch", - "name": "Launch Telescope", - "program": "${workspaceFolder}/src/backend/index.js", - "envFile": "${workspaceRoot}/.env", - "skipFiles": ["/**"] - }, { "type": "node", "request": "launch", diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 6cef50c1f8..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -# This Docker file is used for -# `development`, `test`, and `staging` enviornments -# -# CLI command to run this file: script="some-name" docker-compose up --build -# `some-name` is one of the names provided under `scripts` tag in `package.json` file -# example: $ script="test" docker-compose up --build --> will run telescope with `test` script -# default: $ docker-compose up --build --> will run telescope with `start` script - - -# Dockerfile -# -# ------------------------------------- - -# NEXT_PUBLIC_API_URL is needed by the next.js build, which we define -# as a build ARG in API_URL - -# Context: Build Context -FROM node:16 as build - -# Set Working Directory Context -WORKDIR "/telescope" - -# Copy package.jsons for each service -COPY package.json . - -# ------------------------------------- -# Context: Dependencies -FROM build AS backend_dependencies - -# Install Production Modules! -# Disable postinstall hook in this case since we are being explict with installs -# `postinstall` typically goes on to install front-end and autodeployment modules -# which though is logical for local development, breaks docker container caching trick. -RUN npm install --only=production --no-package-lock --ignore-scripts - -# ------------------------------------- -# Context: Release -FROM node:16-alpine3.15 AS release - -# GET production code from previous containers -COPY --from=backend_dependencies /telescope/node_modules /telescope/node_modules -COPY ./src/backend ./src/backend -COPY package.json . - -# Directory for log files -RUN mkdir /log - -# Environment variable with default value -ENV script=start - -# Running telescope when the image gets built using a script -# `script` is one of the scripts from `package.json`, passed to the image -CMD ["sh", "-c", "npm run ${script}"] diff --git a/config/env.development b/config/env.development index b3238853fa..ea1bbb3992 100644 --- a/config/env.development +++ b/config/env.development @@ -188,38 +188,12 @@ DEPENDENCY_DISCOVERY_URL=http://localhost/v1/dependency-discovery RSS_BRIDGE_URL=http://localhost/v1/rss-bridge ################################################################################ -# Telescope 1.0 Legacy Environment +# Environment ################################################################################ # NODE_ENV should be one of "development" or "production" NODE_ENV=development -# PORT is the port used by the web server -PORT=3000 - -# API_URL is the URL of the Telescope Web API server. Change this to -# pick which backend server our frontend uses. If you are developing -# locally, this will be localhost:{PORT}, probably http://localhost:3000. -# Other possible values include: -# -# (local development) -# API_URL=http://localhost:3000 -# (our staging server) -# API_URL=https://dev.telescope.cdot.systems -# (our production server) -# API_URL=https://telescope.cdot.systems -API_URL=http://localhost:3000 - -# PROXY_FRONTEND=1 will allow proxying the Next dev server (http://localhost:8000) -# through our node server (http://localhost:3000). Useful for testing locally. -# To run: -# -# 1. set PROXY_FRONTEND=1 in the .env -# 2. run the web server: `pnpm start` -# 3. run the Next dev server: `pnpm develop` -# 4. open http://localhost:3000/ and you'll get content from http://localhost:8000 -PROXY_FRONTEND= - # LOG_LEVEL is used to set the level of debugging for the logs. # info, error and debug are commonly used levels. See http://getpino.io/#/docs/api?id=level for more info on levels. # to completely disable all logs, use silent. diff --git a/config/env.production b/config/env.production index f3540c3f7a..5ba0ea65cb 100644 --- a/config/env.production +++ b/config/env.production @@ -190,11 +190,6 @@ RSS_BRIDGE_URL=https://api.telescope.cdot.systems/v1/rss-bridge # NODE_ENV should be one of "development" or "production" NODE_ENV=production -# PORT is the port used by the web server -PORT=3000 - -API_URL=https://telescope.cdot.systems - # LOG_LEVEL is used to set the level of debugging for the logs. # info, error and debug are commonly used levels. See http://getpino.io/#/docs/api?id=level for more info on levels. # to completely disable all logs, use silent. diff --git a/config/env.staging b/config/env.staging index 4e059ac13a..bc8b26ee60 100644 --- a/config/env.staging +++ b/config/env.staging @@ -186,17 +186,12 @@ DEPENDENCY_DISCOVERY_URL=https://dev.api.telescope.cdot.systems/v1/dependency-di RSS_BRIDGE_URL=https://dev.api.telescope.cdot.systems/v1/rss-bridge ################################################################################ -# Telescope 1.0 Legacy Environment +# Environment ################################################################################ # NODE_ENV should be one of "development" or "production" NODE_ENV=production -# PORT is the port used by the web server -PORT=3000 - -API_URL=https://dev.telescope.cdot.systems - # LOG_LEVEL is used to set the level of debugging for the logs. # info, error and debug are commonly used levels. See http://getpino.io/#/docs/api?id=level for more info on levels. # to completely disable all logs, use silent. diff --git a/docker/development.yml b/docker/development.yml index b49f6c0c9a..a6323934e4 100644 --- a/docker/development.yml +++ b/docker/development.yml @@ -78,10 +78,9 @@ services: cache_from: - docker.cdot.systems/parser:buildcache environment: - # In development and testing, the SSO service needs to contact the Supabase + # In development and testing, the Parser service needs to contact the Supabase # service directly via Docker vs through the http://localhost/v1/supabase domain. - # Using staging database - - SUPABASE_URL=https://dev.api.telescope.cdot.systems/v1/supabase + - SUPABASE_URL=http://kong:8000 depends_on: - elasticsearch - traefik diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index ea55e6d78c..b85f1d8c82 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -6,7 +6,7 @@ version: '3' services: ############################################################################## - # Telescope 2.0 Microservices + # Telescope Microservices ############################################################################## # status service @@ -265,9 +265,7 @@ services: # next.js needs build-time access to a number of API URL values, forward as ARGs args: # Web front-end URL - - WEB_URL - # Telescope 1.0 API URL - - API_URL + - WEB_URL=${WEB_URL} # Telescope 2.0 Microservice URLs - IMAGE_URL - SSO_URL diff --git a/docker/production.yml b/docker/production.yml index 241f0d74bb..f54d50719d 100644 --- a/docker/production.yml +++ b/docker/production.yml @@ -13,57 +13,7 @@ version: '3' services: ############################################################################## - # Telescope 1.0: Front-end and Legacy (pre-microservices) Back-end - ############################################################################## - telescope: - build: - context: .. - dockerfile: Dockerfile - container_name: 'telescope' - restart: unless-stopped - environment: - # a place-holder for the variable passed by CLI with a default value - # the passed or default value is a command to run `telescope` - - script=${script:-start} - # override the .env variable for `REDIS_URL` - - REDIS_URL=redis://redis:6379 - # variables for elasticsearch - - ES_HOST=elasticsearch - - ELASTIC_PORT=9200 - - ELASTIC_URL=http://elasticsearch - # Environment values we expect from our legacy .env file - - NODE_ENV=production - - PORT - - POSTS_URL - - API_URL - - SSO_URL - - WEB_URL - - SEARCH_URL - - FEED_DISCOVERY_URL - - LOG_LEVEL - - FEED_URL - - FEED_URL_INTERVAL_MS - - ELASTIC_MAX_RESULTS_PER_PAGE - - ELASTIC_DELAY_MS - - SSO_LOGIN_URL - - SSO_LOGIN_CALLBACK_URL - - SLO_LOGOUT_URL - - SLO_LOGOUT_CALLBACK_URL - - SSO_IDP_PUBLIC_KEY_CERT - - SAML_ENTITY_ID - - ADMINISTRATORS - - FEED_PROCESSING_DELAY_SEC - - FEED_QUEUE_ATTEMPTS - - FEED_QUEUE_DELAY_MS - - FEED_QUEUE_PARALLEL_WORKERS - - MAX_POSTS_PER_PAGE - - GIT_COMMIT - depends_on: - - redis - - elasticsearch - - ############################################################################## - # Telescope 2.0 Microservices + # Telescope Microservices ############################################################################## # status service diff --git a/jest.config.js b/jest.config.js index e037525c15..ca4c4cf564 100644 --- a/jest.config.js +++ b/jest.config.js @@ -5,8 +5,6 @@ module.exports = { projects: [ // Our front-end '/src/web/app/jest.config.js', - // Our legacy backend tests, slowly being migrated to microservices - '/test/jest.config.js', // Satellite '/src/satellite/jest.config.js', // Our current microservices, we're missing a few that needs tests still diff --git a/package.json b/package.json index a17ef688c7..0661ec00c4 100644 --- a/package.json +++ b/package.json @@ -13,9 +13,6 @@ "build": "pnpm turbo run build", "dev": "pnpm dev --prefix src/web/app --", "develop": "pnpm dev", - "eslint": "eslint --config .eslintrc.js --ignore-path .gitignore \"./src/backend/**/*.js\" \"./test/**/*.js\"", - "eslint-time": "TIMING=1 eslint --config .eslintrc.js --ignore-path .gitignore \"./src/backend/**/*.js\" \"./test/**/*.js\"", - "eslint-fix": "eslint --config .eslintrc.js --ignore-path .gitignore \"./src/backend/**/*.js\" \"./test/**/*.js\" --fix", "lint": "pnpm turbo run lint && pnpm eslint", "lint-time": "pnpm turbo run lint-time && pnpm eslint-time", "clean": "pnpm turbo run clean && pnpm -r exec rm -rf node_modules", @@ -29,8 +26,6 @@ "jest-watch": "jest -c jest.config.js --watch --", "jest-update": "jest -c jest.config.js --updateSnapshot --", "jest:e2e-watch": "jest -c jest.config.e2e.js --watch --", - "start": "node src/backend", - "server": "node src/backend/web/server", "test-ci": "run-s prettier-check test", "pre-commit": "pretty-quick --staged", "preversion": "node tools/collect-dependencies.js -- -o src/api/dependency-discovery/deps.txt && git add src/api/dependency-discovery/deps.txt", @@ -51,46 +46,9 @@ }, "homepage": "https://github.com/Seneca-CDOT/telescope#readme", "dependencies": { - "@bull-board/api": "3.10.4", - "@bull-board/express": "3.10.4", - "@elastic/elasticsearch": "7.16.0", - "@elastic/elasticsearch-mock": "0.3.1", - "@wordpress/wordcount": "2.15.2", "babel-jest": "27.5.1", - "bull": "3.29.3", - "clean-whitespace": "0.1.2", - "connect-redis": "6.1.3", - "cors": "2.8.5", - "date-fns": "2.28.0", "docker-compose": "0.23.17", - "dotenv": "10.0.0", - "entities": "3.0.1", - "express": "4.17.3", - "express-healthcheck": "0.1.0", - "express-session": "1.17.2", - "express-validator": "6.14.0", - "feed": "4.2.2", - "helmet": "4.6.0", - "highlight.js": "11.4.0", - "http-proxy-middleware": "2.0.4", - "ioredis": "4.28.5", - "ioredis-mock": "5.9.1", - "jsdom": "18.1.1", - "linkify-html": "3.0.5", - "linkifyjs": "3.0.5", - "node-fetch": "2.6.7", - "normalize-url": "6.1.0", - "opml-generator": "1.1.1", - "passport": "0.5.2", - "passport-saml": "3.2.1", - "pino": "7.8.0", - "pino-elasticsearch": "6.2.0", - "pino-http": "6.6.0", - "pino-pretty": "7.5.3", - "rss-parser": "3.12.0", - "sanitize-html": "2.6.1", - "set-interval-async": "2.0.3", - "stoppable": "1.1.0" + "dotenv": "10.0.0" }, "devDependencies": { "@babel/core": "7.17.9", @@ -100,11 +58,9 @@ "@babel/preset-typescript": "7.16.7", "@parcel/packager-ts": "2.4.1", "@senecacdot/eslint-config-telescope": "1.1.0", - "@types/jest": "27.4.1", "@vercel/node": "1.13.0", - "babel-preset-next": "1.4.0", + "@types/jest": "27.4.1", "eslint": "7.32.0", - "fast-xml-parser": "3.21.1", "husky": "7.0.4", "jest": "27.5.1", "jest-circus": "27.5.1", @@ -113,12 +69,10 @@ "jest-playwright-preset": "1.7.0", "jest-runner": "27.5.1", "nock": "13.2.4", - "npm-run-all": "4.1.5", "playwright": "1.20.2", "prettier": "2.5.1", "pretty-quick": "3.1.3", "redis-commands": "1.7.0", - "run.env": "1.1.0", "supertest": "6.1.6", "ts-jest": "27.1.3", "turbo": "1.2.2", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bf0c090ade..47eb74efbf 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -17,112 +17,34 @@ importers: '@babel/preset-env': 7.16.11 '@babel/preset-react': 7.16.7 '@babel/preset-typescript': 7.16.7 - '@bull-board/api': 3.10.4 - '@bull-board/express': 3.10.4 - '@elastic/elasticsearch': 7.16.0 - '@elastic/elasticsearch-mock': 0.3.1 '@parcel/packager-ts': 2.4.1 '@senecacdot/eslint-config-telescope': 1.1.0 '@types/jest': 27.4.1 '@vercel/node': 1.13.0 - '@wordpress/wordcount': 2.15.2 babel-jest: 27.5.1 - babel-preset-next: 1.4.0 - bull: 3.29.3 - clean-whitespace: 0.1.2 - connect-redis: 6.1.3 - cors: 2.8.5 - date-fns: 2.28.0 docker-compose: 0.23.17 dotenv: 10.0.0 - entities: 3.0.1 eslint: 7.32.0 - express: 4.17.3 - express-healthcheck: 0.1.0 - express-session: 1.17.2 - express-validator: 6.14.0 - fast-xml-parser: 3.21.1 - feed: 4.2.2 - helmet: 4.6.0 - highlight.js: 11.4.0 - http-proxy-middleware: 2.0.4 husky: 7.0.4 - ioredis: 4.28.5 - ioredis-mock: 5.9.1 jest: 27.5.1 jest-circus: 27.5.1 jest-environment-node: 27.5.1 jest-fetch-mock: 3.0.3 jest-playwright-preset: 1.7.0 jest-runner: 27.5.1 - jsdom: 18.1.1 - linkify-html: 3.0.5 - linkifyjs: 3.0.5 nock: 13.2.4 - node-fetch: 2.6.7 - normalize-url: 6.1.0 - npm-run-all: 4.1.5 - opml-generator: 1.1.1 - passport: 0.5.2 - passport-saml: 3.2.1 - pino: 7.8.0 - pino-elasticsearch: 6.2.0 - pino-http: 6.6.0 - pino-pretty: 7.5.3 playwright: 1.20.2 prettier: 2.5.1 pretty-quick: 3.1.3 redis-commands: 1.7.0 - rss-parser: 3.12.0 - run.env: 1.1.0 - sanitize-html: 2.6.1 - set-interval-async: 2.0.3 - stoppable: 1.1.0 supertest: 6.1.6 ts-jest: 27.1.3 turbo: 1.2.2 typescript: 4.4.4 dependencies: - '@bull-board/api': 3.10.4 - '@bull-board/express': 3.10.4 - '@elastic/elasticsearch': 7.16.0 - '@elastic/elasticsearch-mock': 0.3.1 - '@wordpress/wordcount': 2.15.2 babel-jest: 27.5.1_@babel+core@7.17.9 - bull: 3.29.3 - clean-whitespace: 0.1.2 - connect-redis: 6.1.3 - cors: 2.8.5 - date-fns: 2.28.0 docker-compose: 0.23.17 dotenv: 10.0.0 - entities: 3.0.1 - express: 4.17.3 - express-healthcheck: 0.1.0 - express-session: 1.17.2 - express-validator: 6.14.0 - feed: 4.2.2 - helmet: 4.6.0 - highlight.js: 11.4.0 - http-proxy-middleware: 2.0.4 - ioredis: 4.28.5 - ioredis-mock: 5.9.1_ba8a89de17b1bb8d40d8173f0d50f179 - jsdom: 18.1.1 - linkify-html: 3.0.5_linkifyjs@3.0.5 - linkifyjs: 3.0.5 - node-fetch: 2.6.7 - normalize-url: 6.1.0 - opml-generator: 1.1.1 - passport: 0.5.2 - passport-saml: 3.2.1 - pino: 7.8.0 - pino-elasticsearch: 6.2.0 - pino-http: 6.6.0 - pino-pretty: 7.5.3 - rss-parser: 3.12.0 - sanitize-html: 2.6.1 - set-interval-async: 2.0.3 - stoppable: 1.1.0 devDependencies: '@babel/core': 7.17.9 '@babel/plugin-transform-runtime': 7.17.0_@babel+core@7.17.9 @@ -133,9 +55,7 @@ importers: '@senecacdot/eslint-config-telescope': 1.1.0_eslint@7.32.0 '@types/jest': 27.4.1 '@vercel/node': 1.13.0 - babel-preset-next: 1.4.0_@babel+core@7.17.9 eslint: 7.32.0 - fast-xml-parser: 3.21.1 husky: 7.0.4 jest: 27.5.1 jest-circus: 27.5.1 @@ -144,12 +64,10 @@ importers: jest-playwright-preset: 1.7.0_5ef5350c5949a17a4231bb9b0d6390e9 jest-runner: 27.5.1 nock: 13.2.4 - npm-run-all: 4.1.5 playwright: 1.20.2 prettier: 2.5.1 pretty-quick: 3.1.3_prettier@2.5.1 redis-commands: 1.7.0 - run.env: 1.1.0 supertest: 6.1.6 ts-jest: 27.1.3_87e591b3dc93edb8ac6ecf76c63bd1d2 turbo: 1.2.2 @@ -1427,20 +1345,6 @@ packages: - supports-color dev: true - /@babel/plugin-proposal-decorators/7.16.7_@babel+core@7.17.9: - resolution: {integrity: sha512-DoEpnuXK14XV9btI1k8tzNGCutMclpj4yru8aXKoHlVmbO1s+2A+g2+h4JhcjrxkFJqzbymnLG6j/niOf3iFXQ==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.17.9 - '@babel/helper-create-class-features-plugin': 7.16.10_@babel+core@7.17.9 - '@babel/helper-plugin-utils': 7.16.7 - '@babel/plugin-syntax-decorators': 7.16.7_@babel+core@7.17.9 - transitivePeerDependencies: - - supports-color - dev: true - /@babel/plugin-proposal-dynamic-import/7.16.7_@babel+core@7.17.8: resolution: {integrity: sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg==} engines: {node: '>=6.9.0'} @@ -1798,16 +1702,6 @@ packages: '@babel/helper-plugin-utils': 7.16.7 dev: true - /@babel/plugin-syntax-decorators/7.16.7_@babel+core@7.17.9: - resolution: {integrity: sha512-vQ+PxL+srA7g6Rx6I1e15m55gftknl2X8GCUW1JTlkTaXZLJOS0UcaY0eK9jYT7IYf4awn6qwyghVHLDz1WyMw==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.17.9 - '@babel/helper-plugin-utils': 7.16.7 - dev: true - /@babel/plugin-syntax-dynamic-import/7.8.3_@babel+core@7.17.8: resolution: {integrity: sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==} peerDependencies: @@ -3241,12 +3135,6 @@ packages: dependencies: regenerator-runtime: 0.13.9 - /@babel/runtime/7.5.0: - resolution: {integrity: sha512-2xsuyZ0R0RBFwjgae5NpXk8FcfH4qovj5cEM5VEeB7KXnKqzaisIu2HSV/mCEISolJJuR4wkViUGYujA8MH9tw==} - dependencies: - regenerator-runtime: 0.13.9 - dev: false - /@babel/template/7.16.7: resolution: {integrity: sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w==} engines: {node: '>=6.9.0'} @@ -4011,18 +3899,6 @@ packages: into-stream: 6.0.0 dev: false - /@elastic/elasticsearch/7.16.0: - resolution: {integrity: sha512-lMY2MFZZFG3om7QNHninxZZOXYx3NdIUwEISZxqaI9dXPoL3DNhU31keqjvx1gN6T74lGXAzrRNP4ag8CJ/VXw==} - engines: {node: '>=12'} - dependencies: - debug: 4.3.3 - hpagent: 0.1.2 - ms: 2.1.3 - secure-json-parse: 2.4.0 - transitivePeerDependencies: - - supports-color - dev: false - /@elastic/elasticsearch/7.17.0: resolution: {integrity: sha512-5QLPCjd0uLmLj1lSuKSThjNpq39f6NmlTy9ROLFwG5gjyTgpwSqufDeYG/Fm43Xs05uF7WcscoO7eguI3HuuYA==} engines: {node: '>=12'} @@ -4369,7 +4245,7 @@ packages: engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} dependencies: callsites: 3.1.0 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 source-map: 0.6.1 dev: true @@ -7265,13 +7141,6 @@ packages: '@xtuc/long': 4.2.2 dev: true - /@wordpress/wordcount/2.15.2: - resolution: {integrity: sha512-y7dltZQrdtUatzpDVpZxNfXeDva4xRw30lO57MkxmeqlWOpZCrgCK7czNbebTC1CUXZ9xbKiOrNdnFgE6CnoOw==} - dependencies: - '@babel/runtime': 7.16.7 - lodash: 4.17.21 - dev: false - /@xmldom/xmldom/0.7.5: resolution: {integrity: sha512-V3BIhmY36fXZ1OtVcI9W+FxQqxVLsPKcNjWigIaa81dLC9IolJl5Mt4Cvhmr0flUnjSpTdrbMTSbXqYqV5dT6A==} engines: {node: '>=10.0.0'} @@ -7752,7 +7621,7 @@ packages: /axios/0.25.0: resolution: {integrity: sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==} dependencies: - follow-redirects: 1.14.7 + follow-redirects: 1.14.7_debug@4.3.3 transitivePeerDependencies: - debug dev: false @@ -7936,10 +7805,6 @@ packages: - supports-color dev: true - /babel-plugin-react-require/3.1.3: - resolution: {integrity: sha512-kDXhW2iPTL81x4Ye2aUMdEXQ56JP0sBJmRQRXJPH5FsNB7fOc/YCsHTqHv8IovPyw9Rk07gdd7MVUz8tUmRBCA==} - dev: true - /babel-preset-current-node-syntax/1.0.1_@babel+core@7.17.9: resolution: {integrity: sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==} peerDependencies: @@ -7969,20 +7834,6 @@ packages: babel-plugin-jest-hoist: 27.5.1 babel-preset-current-node-syntax: 1.0.1_@babel+core@7.17.9 - /babel-preset-next/1.4.0_@babel+core@7.17.9: - resolution: {integrity: sha512-+86BkjiV3eGyXg61QLE33KCtXna/ZGoYxGDFhtr9Nqd2tdt+gLIqQrz0NXtZW2vU5RMVISqZEKhrxPK/tFOWEg==} - dependencies: - '@babel/plugin-proposal-decorators': 7.16.7_@babel+core@7.17.9 - '@babel/plugin-syntax-dynamic-import': 7.8.3_@babel+core@7.17.9 - '@babel/plugin-transform-runtime': 7.17.0_@babel+core@7.17.9 - '@babel/preset-env': 7.16.11_@babel+core@7.17.9 - '@babel/preset-react': 7.16.7_@babel+core@7.17.9 - babel-plugin-react-require: 3.1.3 - transitivePeerDependencies: - - '@babel/core' - - supports-color - dev: true - /bail/1.0.5: resolution: {integrity: sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==} dev: false @@ -10050,10 +9901,6 @@ packages: engines: {node: '>=10'} dev: false - /dotenv/2.0.0: - resolution: {integrity: sha1-vXWcNXqqcDZeAclrewvsCKbg2Uk=} - dev: true - /dotenv/7.0.0: resolution: {integrity: sha512-M3NhsLbV1i6HuGzBUH8vXrtxOk+tWmzWKDMbAVSUp3Zsjm7ywFeuwrUXhmhQyRK1q5B5GGy7hcXPbj3bnfZg2g==} engines: {node: '>=6'} @@ -10971,10 +10818,6 @@ packages: handlebars: 4.7.7 dev: false - /express-healthcheck/0.1.0: - resolution: {integrity: sha1-yr7HgSnEy5DNf7iU364huC4nywc=} - dev: false - /express-jwt/6.1.1: resolution: {integrity: sha512-m8gkY04v5jtiFZn6bYQINYX/DVXq1DVb5nIW7H8l87qJ4BBvtQKFRpxyRE31odct7OPfHdT+B8678zJHhlMrpw==} engines: {node: '>= 8.0.0'} @@ -11127,13 +10970,6 @@ packages: punycode: 1.4.1 dev: false - /fast-xml-parser/3.21.1: - resolution: {integrity: sha512-FTFVjYoBOZTJekiUsawGsSYV9QL0A+zDYCRj7y34IO6Jg+2IMYEtQa+bbictpdpV8dHxXywqU7C0gRDEOFtBFg==} - hasBin: true - dependencies: - strnum: 1.0.5 - dev: true - /fastest-stable-stringify/2.0.2: resolution: {integrity: sha512-bijHueCGd0LqqNK9b5oCMHc0MluJAx0cwqASgbWMvkO01lCYgIhacVRLcaDz3QnyYIRNJRDwMb41VuT6pHJ91Q==} dev: false @@ -11380,6 +11216,7 @@ packages: peerDependenciesMeta: debug: optional: true + dev: true /follow-redirects/1.14.7_debug@4.3.3: resolution: {integrity: sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==} @@ -11998,11 +11835,6 @@ packages: hasBin: true dev: false - /helmet/4.6.0: - resolution: {integrity: sha512-HVqALKZlR95ROkrnesdhbbZJFi/rIVSoNq6f3jA/9u6MIbTsPh3xZwihjeI5+DO/2sOV6HMHooXcEOuwskHpTg==} - engines: {node: '>=10.0.0'} - dev: false - /helmet/5.0.2: resolution: {integrity: sha512-QWlwUZZ8BtlvwYVTSDTBChGf8EOcQ2LkGMnQJxSzD1mUu8CCjXJZq/BXP8eWw4kikRnzlhtYo3lCk0ucmYA3Vg==} engines: {node: '>=12.0.0'} @@ -12254,24 +12086,6 @@ packages: transitivePeerDependencies: - supports-color - /http-proxy-middleware/2.0.4: - resolution: {integrity: sha512-m/4FxX17SUvz4lJ5WPXOHDUuCwIqXLfLHs1s0uZ3oYjhoXlx9csYxaOa0ElDEJ+h8Q4iJ1s+lTMbiCa4EXIJqg==} - engines: {node: '>=12.0.0'} - peerDependencies: - '@types/express': ^4.17.13 - peerDependenciesMeta: - '@types/express': - optional: true - dependencies: - '@types/http-proxy': 1.17.8 - http-proxy: 1.18.1 - is-glob: 4.0.3 - is-plain-obj: 3.0.0 - micromatch: 4.0.4 - transitivePeerDependencies: - - debug - dev: false - /http-proxy-middleware/2.0.4_@types+express@4.17.13: resolution: {integrity: sha512-m/4FxX17SUvz4lJ5WPXOHDUuCwIqXLfLHs1s0uZ3oYjhoXlx9csYxaOa0ElDEJ+h8Q4iJ1s+lTMbiCa4EXIJqg==} engines: {node: '>=12.0.0'} @@ -12296,7 +12110,7 @@ packages: engines: {node: '>=8.0.0'} dependencies: eventemitter3: 4.0.7 - follow-redirects: 1.14.7 + follow-redirects: 1.14.7_debug@4.3.3 requires-port: 1.0.0 transitivePeerDependencies: - debug @@ -13163,7 +12977,7 @@ packages: '@types/node': 17.0.12 anymatch: 3.1.2 fb-watchman: 2.0.1 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 jest-regex-util: 27.5.1 jest-serializer: 27.5.1 jest-util: 27.5.1 @@ -13234,7 +13048,7 @@ packages: '@jest/types': 27.5.1 '@types/stack-utils': 2.0.1 chalk: 4.1.2 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 micromatch: 4.0.4 pretty-format: 27.5.1 slash: 3.0.0 @@ -13325,7 +13139,7 @@ packages: dependencies: '@jest/types': 27.5.1 chalk: 4.1.2 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 jest-haste-map: 27.5.1 jest-pnp-resolver: 1.2.2_jest-resolve@27.5.1 jest-util: 27.5.1 @@ -13383,7 +13197,7 @@ packages: collect-v8-coverage: 1.0.1 execa: 5.1.1 glob: 7.2.0 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 jest-haste-map: 27.5.1 jest-message-util: 27.5.1 jest-mock: 27.5.1 @@ -13402,7 +13216,7 @@ packages: engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} dependencies: '@types/node': 17.0.12 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 /jest-snapshot/27.5.1: resolution: {integrity: sha512-yYykXI5a0I31xX67mgeLw1DZ0bJB+gpq5IpSuCAoyDi0+BhgU/RIrL+RTzDmkNTchvDFWKP8lp+w/42Z3us5sA==} @@ -13420,7 +13234,7 @@ packages: babel-preset-current-node-syntax: 1.0.1_@babel+core@7.17.9 chalk: 4.1.2 expect: 27.5.1 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 jest-diff: 27.5.1 jest-get-type: 27.5.1 jest-haste-map: 27.5.1 @@ -13442,7 +13256,7 @@ packages: '@types/node': 17.0.12 chalk: 4.1.2 ci-info: 3.3.0 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 picomatch: 2.3.1 dev: true @@ -13454,7 +13268,7 @@ packages: '@types/node': 17.0.12 chalk: 4.1.2 ci-info: 3.3.0 - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 picomatch: 2.3.1 /jest-validate/27.5.1: @@ -15115,12 +14929,6 @@ packages: hasBin: true dev: false - /opml-generator/1.1.1: - resolution: {integrity: sha1-1mzUFtAr6GB9FeuTGH2GwspZG84=} - dependencies: - xml: 1.0.1 - dev: false - /optionator/0.8.3: resolution: {integrity: sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==} engines: {node: '>= 0.8.0'} @@ -15303,7 +15111,7 @@ packages: resolution: {integrity: sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==} engines: {node: '>=8'} dependencies: - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 hasha: 5.2.2 lodash.flattendeep: 4.4.0 release-zalgo: 1.0.0 @@ -15663,20 +15471,6 @@ packages: split2: 4.1.0 dev: false - /pino-elasticsearch/6.2.0: - resolution: {integrity: sha512-kGjgRK84GBO9HU6d9Wnm2v/XY0rxWB/olPEQPHg54KiFiFpfN112jB7sqda8PeMCzgf5KaJKdZncP04sok+Tmg==} - engines: {node: '>=10'} - hasBin: true - dependencies: - '@elastic/elasticsearch': 7.16.0 - minimist: 1.2.5 - pump: 3.0.0 - readable-stream: 3.6.0 - split2: 3.2.2 - transitivePeerDependencies: - - supports-color - dev: false - /pino-http/6.6.0: resolution: {integrity: sha512-PlItaK2MLpoIMLEcClhfb1VQk/o6fKppINl5s6sPE/4rvufkdO3kCSs/92EwrBsB1yssRCQqDV+w1xpYuPVnjg==} dependencies: @@ -16591,7 +16385,7 @@ packages: /proper-lockfile/4.1.2: resolution: {integrity: sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==} dependencies: - graceful-fs: 4.2.9 + graceful-fs: 4.2.10 retry: 0.12.0 signal-exit: 3.0.6 dev: true @@ -17569,14 +17363,6 @@ packages: resolution: {integrity: sha512-Arc4hUN896vjkqCYrUXquBFtRZdv1PfLbTYP71efP6butxyQ0kWpiNJyAgsxscmQg1cqvHY32/UCBzXedTpU2g==} dev: false - /run.env/1.1.0: - resolution: {integrity: sha512-KWs8/mIvCs+LPfoQXp4zPo0gZiQCUNKyV33tN7m6r5jL2ZzT11/A2Y1UACsiSMAB53i7jp26uDN2+AeZuZ7+vw==} - engines: {node: '>=6.1.0', npm: '>=3.8.6'} - hasBin: true - dependencies: - dotenv: 2.0.0 - dev: true - /rw/1.3.3: resolution: {integrity: sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q=} dev: false @@ -17625,17 +17411,6 @@ packages: postcss: 8.4.5 dev: false - /sanitize-html/2.6.1: - resolution: {integrity: sha512-DzjSz3H5qDntD7s1TcWCSoRPmNR8UmA+y+xZQOvWgjATe2Br9ZW73+vD3Pj6Snrg0RuEuJdXgrKvnYuiuixRkA==} - dependencies: - deepmerge: 4.2.2 - escape-string-regexp: 4.0.0 - htmlparser2: 6.1.0 - is-plain-object: 5.0.0 - parse-srcset: 1.0.2 - postcss: 8.4.5 - dev: false - /sass/1.49.9: resolution: {integrity: sha512-YlYWkkHP9fbwaFRZQRXgDi3mXZShslVmmo+FVK3kHLUELHHEYrCmL1x6IUjC7wLS6VuJSAFXRQS/DxdsC4xL1A==} engines: {node: '>=12.0.0'} @@ -17835,12 +17610,6 @@ packages: engines: {node: '>=6.9'} dev: false - /set-interval-async/2.0.3: - resolution: {integrity: sha512-8jJgvnhQYQc+XHzyKuJ2g4/0h4jPcT/q3x9VURk+AZohRKpcggcueNhPbS7wOXnamgpAn/enbGl4OnWXurVafg==} - dependencies: - '@babel/runtime': 7.5.0 - dev: false - /setimmediate/1.0.5: resolution: {integrity: sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=} dev: false @@ -18165,12 +17934,6 @@ packages: engines: {node: '>=6'} dev: false - /split2/3.2.2: - resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==} - dependencies: - readable-stream: 3.6.0 - dev: false - /split2/4.1.0: resolution: {integrity: sha512-VBiJxFkxiXRlUIeyMQi8s4hgvKCSjtknJv/LVYbrgALPwf5zSKmEwV9Lst25AkvMDnvxODugjdl6KZgwKM1WYQ==} engines: {node: '>= 10.x'} @@ -18401,10 +18164,6 @@ packages: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} - /strnum/1.0.5: - resolution: {integrity: sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==} - dev: true - /style-to-object/0.3.0: resolution: {integrity: sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==} dependencies: @@ -20303,10 +20062,6 @@ packages: resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} engines: {node: '>=12'} - /xml/1.0.1: - resolution: {integrity: sha1-eLpyAgApxbyHuKgaPPzXS0ovweU=} - dev: false - /xml2js/0.4.23: resolution: {integrity: sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==} engines: {node: '>=4.0.0'} diff --git a/src/api/parser/env.local b/src/api/parser/env.local index 813711ac1b..7edaa48c31 100644 --- a/src/api/parser/env.local +++ b/src/api/parser/env.local @@ -38,6 +38,6 @@ PARSER_PORT = 10000 ################################################################################ # Supabase Secrets -# Using staging database -#SUPABASE_URL=http://localhost/v1/supabase -SUPABASE_URL=https://dev.supabase.telescope.cdot.systems/ +SUPABASE_URL=http://localhost/v1/supabase +SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q + diff --git a/src/backend/data/admin.js b/src/backend/data/admin.js deleted file mode 100644 index 87ec1b1140..0000000000 --- a/src/backend/data/admin.js +++ /dev/null @@ -1,34 +0,0 @@ -const Feed = require('./feed'); -const User = require('./user'); -const hash = require('./hash'); - -// Get space separated list of admin accounts from env -const administrators = process.env.ADMINISTRATORS ? process.env.ADMINISTRATORS.split(' ') : []; - -class Admin extends User { - constructor(name, email, id, nameID, nameIDFormat) { - super(name, email, id, nameID, nameIDFormat); - this.isAdmin = true; - } - - // An admin owns all feeds - owns() { - return true; - } - - // Return every feed for this user - feeds() { - return Feed.all(); - } - - /** - * We define an administrator as someone who is specified in the .env - * ADMINISTRATORS variable list. We support bare email addresses and hashed. - * See env.sample for more details. - */ - static isAdmin(id) { - return administrators.some((admin) => id === admin || id === hash(admin)); - } -} - -module.exports = Admin; diff --git a/src/backend/data/article-error.js b/src/backend/data/article-error.js deleted file mode 100644 index 8c802899bb..0000000000 --- a/src/backend/data/article-error.js +++ /dev/null @@ -1,13 +0,0 @@ -class ArticleError extends Error { - constructor(message) { - super(message); - this.name = this.constructor.name; - if (typeof Error.captureStackTrace === 'function') { - Error.captureStackTrace(this, this.constructor); - } else { - this.stack = new Error(message).stack; - } - } -} - -module.exports = ArticleError; diff --git a/src/backend/data/feed.js b/src/backend/data/feed.js deleted file mode 100644 index 6f76867244..0000000000 --- a/src/backend/data/feed.js +++ /dev/null @@ -1,210 +0,0 @@ -const normalizeUrl = require('normalize-url'); - -const { - getFeed, - getFeeds, - addFeed, - removeFeed, - setInvalidFeed, - isInvalid, - setDelayedFeed, - isDelayed, - removePost, - getPost, - getPosts, - getFlaggedFeeds, - setFlaggedFeed, - unsetFlaggedFeed, -} = require('../utils/storage'); - -const { deletePost } = require('../utils/indexer'); - -const hash = require('./hash'); - -const urlToId = (url) => hash(normalizeUrl(url)); - -class Feed { - constructor(author, url, user, link, etag, lastModified) { - if (!url) { - throw new Error('missing url for feed'); - } - if (!author) { - throw new Error('missing author for feed'); - } - // Use the feed's normalized url as our unique identifier - this.id = urlToId(url); - this.author = author; - this.url = url; - this.user = user; - this.link = link; - - // We may or may not have these cache values when we create a feed. - this.etag = etag === '' ? null : etag; - this.lastModified = lastModified === '' ? null : lastModified; - } - - /** - * Save the current Feed to the database. - * Returns a Promise. - */ - save() { - return addFeed(this); - } - - /** - * Removes current Feed + associated Posts from the databases. - * Returns a Promise - */ - async delete() { - // Removing feeds and getting all posts, we'll be assigning all the Posts we get back to posts - let [, posts] = await Promise.all([removeFeed(this.id), getPosts(0, 0)]); - - // Filter out all posts which do not contain feed id of feed being removed - posts = await Promise.all(posts.map((id) => getPost(id))); - posts = posts.filter((post) => post.feed === this.id); - - // Remove the post from Redis + ElasticSearch - await Promise.all( - [].concat( - posts.map((post) => removePost(post.id)), - posts.map((post) => deletePost(post.id)) - ) - ); - } - - /** - * Updates current Feed in the database. - * Returns a Promise. - */ - async update() { - await this.delete(); - await this.save(); - } - - /** - * Adds the current Feed to the database with the specified reason - * Returns a Promise - */ - setInvalid(reason) { - return setInvalidFeed(this.id, reason); - } - - /** - * Checks whether the current feed is valid or not - * Returns a Promise. - */ - isInvalid() { - return isInvalid(this.id); - } - - /** - * Flags a feed in the database, indicating that its processing should be delayed - * @param {Number} seconds - duration in seconds for which processing should wait - * Returns a Promise - */ - setDelayed(seconds) { - return setDelayedFeed(this.id, seconds); - } - - /** - * Checks whether the current feed is delayed or not - * Returns a Promise - */ - async isDelayed() { - return (await isDelayed(this.id)) === 1; - } - - /** - * Flags the feed, preventing posts from the feed to be displayed - * Returns a Promise - */ - flag() { - return setFlaggedFeed(this.id); - } - - /** - * Unflags the feed, allowing posts from the feed to be displayed - * Returns a Promise - */ - unflag() { - return unsetFlaggedFeed(this.id); - } - - /** - * Creates a new Feed object by extracting data from the given feed-like object. - * @param {Object} feedData - an Object containing the necessary fields. - * Returns the newly created Feed's id as a Promise - */ - static async create(feedData) { - const feed = new Feed( - feedData.author, - feedData.url, - feedData.user, - feedData.link, - feedData.etag, - feedData.lastModified - ); - await feed.save(); - return feed.id; - } - - /** - * Returns a Feed from the database using the given id - * @param {String} id - the id of a feed (hashed, normalized url) to get from Redis. - * Returns a Promise - */ - static async byId(id) { - const data = await getFeed(id); - // No feed found using this id - if (!(data && data.id)) { - return null; - } - return new Feed(data.author, data.url, data.user, data.link, data.etag, data.lastModified); - } - - /** - * Returns a Feed from the database using the given url - * @param {String} url - the url of a feed to get from Redis. - * Returns a Promise - */ - static byUrl(url) { - // Use the URL to generate an id - const id = urlToId(url); - return this.byId(id); - } - - /** - * Returns all unflagged feeds - * Returns a Promise - */ - static async all() { - const ids = await getFeeds(); - return Promise.all(ids.map(Feed.byId)); - } - - /** - * Returns all flagged feeds - * Returns a Promise - */ - static async flagged() { - const ids = await getFlaggedFeeds(); - return Promise.all(ids.map(Feed.byId)); - } - - /** - * Sets all stored feeds' lastModified + etag field to null. Used for production - * Returns a Promise - */ - static async clearCache() { - const allFeeds = await this.all(); - await Promise.all( - allFeeds.map((feed) => { - feed.etag = null; - feed.lastModified = null; - return feed.save(); - }) - ); - } -} - -module.exports = Feed; diff --git a/src/backend/data/hash.js b/src/backend/data/hash.js deleted file mode 100644 index 017607ffd7..0000000000 --- a/src/backend/data/hash.js +++ /dev/null @@ -1,18 +0,0 @@ -const crypto = require('crypto'); - -/** - * Hash function used to generate our unique data ids. - * We use sha256 and encode in hex, so it's safe to use - * in URLs. For example: - * - * 6Xoj0UXOW3FNirlSYranli5gY6dDq60hs24EIAcHAEc= - * - * but truncate to only use the first 10 characters - * in order to reduce key sizes in Redis: - * - * 6Xoj0UXOW3 - * - * This is fine for our needs, as we don't have enough - * data to require the entire hash. - */ -module.exports = (input) => crypto.createHash('sha256').update(input).digest('hex').slice(0, 10); diff --git a/src/backend/data/stats.js b/src/backend/data/stats.js deleted file mode 100644 index e77e9f168f..0000000000 --- a/src/backend/data/stats.js +++ /dev/null @@ -1,78 +0,0 @@ -const { count } = require('@wordpress/wordcount'); -const startOfWeek = require('date-fns/startOfWeek'); -const startOfMonth = require('date-fns/startOfMonth'); -const startOfYear = require('date-fns/startOfYear'); - -const { getPostsByDate } = require('../utils/storage'); -const Post = require('./post'); - -/** - * Get the total number of words in the text of all posts in the array - * @param {Array} posts the array of post objects - */ -const countWords = (posts) => - posts.reduce((total, post) => total + count(post.text, 'words', {}), 0); - -/** - * Get the total number of unique feeds in the posts in the array - * @param {Array} posts the array of post objects - */ -const countFeeds = (posts) => new Set(posts.map((post) => post.feed.author)).size; - -class Stats { - constructor(startDate, endDate) { - if (!(startDate instanceof Date && endDate instanceof Date)) { - throw new TypeError('startDate and endDate must be Dates'); - } - this.startDate = startDate; - this.endDate = endDate; - } - - /** - * Returns a Promise with counts for posts, authors, and words. - */ - async calculate() { - const ids = await getPostsByDate(this.startDate, this.endDate); - const posts = await Promise.all(ids.map(Post.byId)); - - return { - posts: posts.length, - authors: countFeeds(posts), - words: countWords(posts), - }; - } - - /** - * Creates a new Stats object for today - */ - static today() { - const today = new Date(); - return new Stats(today, today); - } - - /** - * Creates a new Stats object for the first day of this week until today - */ - static thisWeek() { - const today = new Date(); - return new Stats(startOfWeek(today), today); - } - - /** - * Creates a new Stats object for the first day of this month until today - */ - static thisMonth() { - const today = new Date(); - return new Stats(startOfMonth(today), today); - } - - /** - * Creates a new Stats object for Jan 1 of this year until today - */ - static thisYear() { - const today = new Date(); - return new Stats(startOfYear(today), today); - } -} - -module.exports = Stats; diff --git a/src/backend/data/user.js b/src/backend/data/user.js deleted file mode 100644 index 584c91ac37..0000000000 --- a/src/backend/data/user.js +++ /dev/null @@ -1,36 +0,0 @@ -const Feed = require('./feed'); - -class User { - constructor(name, email, id, nameID, nameIDFormat) { - this.name = name; - this.email = email; - this.id = id; - this.nameID = nameID; - this.nameIDFormat = nameIDFormat; - this.isAdmin = false; - } - - toJSON() { - return { - name: this.name, - email: this.email, - id: this.id, - nameID: this.nameID, - nameIDFormat: this.nameIDFormat, - isAdmin: this.isAdmin, - }; - } - - owns(feed) { - return feed.user === this.id; - } - - async feeds() { - const feeds = await Feed.all(); - return feeds.filter((feed) => { - return this.owns(feed); - }); - } -} - -module.exports = User; diff --git a/src/backend/feed/processor.js b/src/backend/feed/processor.js deleted file mode 100644 index 62102fbbc1..0000000000 --- a/src/backend/feed/processor.js +++ /dev/null @@ -1,224 +0,0 @@ -/** - * A processor function to be run concurrently, in its own process, and - * with potentially multiple simultaneous instances, by the feed queue. - * https://github.com/OptimalBits/bull#separate-processes - */ - -const Parser = require('rss-parser'); -const fetch = require('node-fetch'); - -const { logger } = require('../utils/logger'); -const Post = require('../data/post'); -const Feed = require('../data/feed'); -const ArticleError = require('../data/article-error'); - -// Check for cached ETag and Last-Modified info on the feed. -function hasHeaders(feed) { - return feed.etag || feed.lastModified; -} - -/** - * If we have extra cache/modification info about this feed, add it to the headers. - * @param {Feed} feed - the feed Object, possibly with etag and lastModified info - */ -function addHeaders(options, feed) { - // If there aren't any cached headers for this feed, return options unmodified - if (!hasHeaders(feed)) { - return options; - } - - // Add conditional headers as appropriate for this feed - options.headers = {}; - if (feed.etag) { - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match - options.headers['If-None-Match'] = feed.etag; - } - if (feed.lastModified) { - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Modified-Since - options.headers['If-Modified-Since'] = feed.lastModified; - } - - return options; -} - -/** - * Get information about the resource at the other end of this feed's url. - * Specifically, we care about ETag and Last-Modified headers, Content-Type, - * and whether or not we should try to download it. - * See https://developer.mozilla.org/en-US/docs/Web/HTTP/Conditional_requests - */ -async function getFeedInfo(feed) { - const info = { - status: null, - etag: null, - lastModified: null, - link: null, - contentType: null, - shouldDownload: true, - // We do not have user-info at this stage currently, once feeds get added it will include passport.js information here. - user: null, - }; - - let response; - try { - // Do a HEAD request, and see what the current version info for this URL is - response = await fetch(feed.url, addHeaders({ method: 'HEAD' }, feed)); - info.status = `[HTTP ${response.status} - ${response.statusText}]`; - info.contentType = response.headers.get('Content-Type'); - info.link = feed.link; - } catch (error) { - logger.error({ error }, `Unable to fetch HEAD info for feed ${feed.url}`); - throw error; - } - - // We didn't get a 200 after adding the conditional headers, stop now. - if (!(response && response.ok)) { - info.shouldDownload = false; - return info; - } - - // Resource version identifier (e.g., W/"ae1acbdfe7ece35f8651d741fcf94465"), - // unique to the contents of the URL (it if changes, the ETag changes). - // See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag - const etag = response.headers.get('ETag'); - if (etag) { - info.etag = etag; - } - - // Date and Time the server thinks this resource was last modified - // (e.g., Mon, 16 Dec 2019 14:15:47 GMT). This may or may not be - // the actual date/time it was modified. The ETag is more accurate. See: - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified) - const lastModified = response.headers.get('Last-Modified'); - if (lastModified) { - info.lastModified = lastModified; - } - - return info; -} - -/** - * Convert an array of Articles from the feed parser into Post objects - * and stores them in Redis. - * @param {Array
} articles to process into posts - */ -function articlesToPosts(articles, feed) { - return Promise.all( - articles.map(async (article) => { - try { - await Post.createFromArticle(article, feed); - } catch (error) { - // If this is just some missing data, ignore the post, otherwise throw. - if (error instanceof ArticleError) { - return; - } - throw error; - } - }) - ); -} - -/** - * The processor for the feed queue receives feed jobs, where - * the job to process is an Object with the `id` of the feed. - * We expect the Feed to already exist in the system at this point. - */ -module.exports = async function processor(job) { - const feed = await Feed.byId(job.data.id); - if (!feed) { - throw new Error(`unable to get Feed for id=${job.data.id}`); - } - - let info; - const [invalid, delayed] = await Promise.all([feed.isInvalid(), feed.isDelayed()]); - if (invalid) { - logger.info(`Skipping resource at ${feed.url}. Feed previously marked invalid`); - return; - } - if (delayed) { - logger.info(`Skipping resource at ${feed.url}. Feed previously marked for delayed processing`); - return; - } - try { - info = await getFeedInfo(feed); - // If we get no new version info, there's nothing left to do. - if (!info.shouldDownload) { - // Log some common cases we see, with a general message if none of these - switch (info.status) { - case 304: - logger.info(`${info.status} Feed is up-to-date: ${feed.url}`); - break; - case 404: - logger.warn(`${info.status} Feed not found: ${feed.url}`); - break; - case 410: - logger.warn(`${info.status} Feed no longer available: ${feed.url}`); - break; - case 429: - logger.warn(`${info.status} Feed requested too many times, setting delay: ${feed.url}`); - await feed.setDelayed(process.env.FEED_PROCESSING_DELAY_SEC || 3600); - break; - case 500: - case 599: - logger.warn(`${info.status} Feed server error: ${feed.url}`); - break; - default: - logger.info(`${info.status} Feed not downloaded: ${feed.url}`); - break; - } - - // No posts were processed. - return; - } - - // Download the updated feed contents - logger.info(`${info.status} Feed has new content: ${feed.url}`); - const parser = new Parser( - addHeaders( - { - // ms to wait for a connection to be assumed to have failed - timeout: 20 * 1000, - gzip: true, - customFields: { - item: [ - ['pubDate', 'pubdate'], - ['creator', 'author'], - ['content:encoded', 'contentEncoded'], - ['updated', 'date'], - ['id', 'guid'], - ['media:group', 'mediaGroup'], - ['published', 'pubdate'], - ], - }, - }, - feed - ) - ); - - const articles = await parser.parseURL(feed.url); - - // Transform the list of articles to a list of Post objects - await articlesToPosts(articles.items, feed); - - // Version info for this feed changed, so update the database - feed.etag = feed.etag || info.etag; - feed.lastModified = feed.lastModified || info.lastModified; - // If feed.link is empty or there are blog posts - if (!feed.link && articles.items.length) { - feed.link = articles?.link || null; - } - await feed.save(); - } catch (error) { - // If the feedparser can't parse this, we get a 'Not a feed' error - if (error.message === 'Not a feed') { - logger.info( - `Skipping resource at ${feed.url}, not a valid feed ${ - info.contentType ? `(${info.contentType})` : '' - }` - ); - } else { - logger.debug({ error }, `Unable to process feed ${feed.url}`); - throw error; - } - } -}; diff --git a/src/backend/feed/queue.js b/src/backend/feed/queue.js deleted file mode 100644 index 1266dd9375..0000000000 --- a/src/backend/feed/queue.js +++ /dev/null @@ -1,40 +0,0 @@ -const { createBullBoard } = require('@bull-board/api'); -const { BullAdapter } = require('@bull-board/api/bullAdapter'); -const { ExpressAdapter } = require('@bull-board/express'); -require('../lib/config'); -const { logger } = require('../utils/logger'); -const { createQueue } = require('../lib/queue'); - -// Create a Bull Redis Queue -const queue = createQueue('feed-queue'); - -// For visualizing queues using bull board -const serverAdapter = new ExpressAdapter(); -createBullBoard({ queues: [new BullAdapter(queue)], serverAdapter }); - -/** - * Provide a helper for adding a feed with our desired default options. - * The `job` contains an `id`, which refers to a Feed Object `id` already in Redis. - */ -queue.addFeed = async function (job) { - const options = { - // Override the Job ID to use the feed id, so we don't duplicate jobs. - // Bull will not add a job if there already exists a job with the same id. - jobId: job.id, - attempts: process.env.FEED_QUEUE_ATTEMPTS || 5, - backoff: { - type: 'exponential', - delay: process.env.FEED_QUEUE_DELAY_MS || 60 * 1000, - }, - removeOnComplete: true, - removeOnFail: true, - }; - - try { - await queue.add(job, options); - } catch (error) { - logger.error({ error }, `Unable to add job for id=${job.id} to queue`); - } -}; - -module.exports = { feedQueue: queue, serverAdapter }; diff --git a/src/backend/feed/worker.js b/src/backend/feed/worker.js deleted file mode 100644 index 5ceefb9606..0000000000 --- a/src/backend/feed/worker.js +++ /dev/null @@ -1,51 +0,0 @@ -require('../lib/config'); -const { cpus } = require('os'); -const path = require('path'); - -const { feedQueue } = require('./queue'); -const { logger } = require('../utils/logger'); -const { waitOnReady } = require('../utils/indexer'); - -/** - * We determine the number of parallel feed processor functions to run - * based on the value of the environment variable FEED_QUEUE_PARALLEL_WORKERS. - * Possible values are: - * - * *: use the number of available CPUs - * : use the given number, up to the number of available CPUs - * : use 1 by default - */ -function getFeedWorkersCount() { - const { FEED_QUEUE_PARALLEL_WORKERS } = process.env; - const cpuCount = cpus().length; - - if (FEED_QUEUE_PARALLEL_WORKERS === '*') { - return cpuCount; - } - - const count = Number(FEED_QUEUE_PARALLEL_WORKERS) || 1; - return Math.min(count, cpuCount); -} - -exports.start = async function () { - try { - await waitOnReady(); - logger.info('Connected to elasticsearch!'); - const concurrency = getFeedWorkersCount(); - logger.debug( - `Starting ${concurrency} instance${concurrency > 1 ? 's' : ''} of feed processor.` - ); - feedQueue.process(concurrency, path.resolve(__dirname, 'processor.js')); - return feedQueue; - } catch (error) { - /** - * If elasticsearch is not initialized, we throw again to terminate Telescope. - * According to nodejs.org: - * "If it is necessary to terminate the Node.js process due to an error condition, - * throwing an uncaught error and allowing the process to terminate accordingly - * is safer than calling process.exit()" - */ - logger.error(error); - throw error; - } -}; diff --git a/src/backend/index.js b/src/backend/index.js deleted file mode 100644 index 909dae295a..0000000000 --- a/src/backend/index.js +++ /dev/null @@ -1,113 +0,0 @@ -require('./lib/config'); -const { feedQueue } = require('./feed/queue'); -const feedWorker = require('./feed/worker'); -const { logger } = require('./utils/logger'); -const getWikiFeeds = require('./utils/wiki-feed-parser'); -const shutdown = require('./lib/shutdown'); -const Feed = require('./data/feed'); - -// Start the web server -require('./web/server'); - -/** - * Shutting Down Logic for most Server Shutdown Cases - */ -process.on('beforeExit', shutdown('beforeExit')); -process.on('SIGTERM', shutdown('SIGTERM')); -process.on('SIGINT', shutdown('SIGINT')); -process.on('SIGQUIT', shutdown('SIGQUIT')); -process.on('unhandledRejection', shutdown('UNHANDLED REJECTION')); -process.on('uncaughtException', shutdown('UNCAUGHT EXCEPTION')); - -/** - * Adds the feed to the database if necessary, or gets a more complete - * version of the feed if we have better data already. - * @param {Object} feedData - feed data parsed from the wiki feed list. - * Returns Promise, with the most appropriate Feed Object to use. - */ -async function updateFeed(feedData) { - let currentFeed; - - // If we have an existing feed in the database for this URL, prefer that, - // since it might have updated cache info (e.g., etag). - const existingFeed = await Feed.byUrl(feedData.url); - if (existingFeed) { - // We have a version of this feed in the database already, prefer that - currentFeed = existingFeed; - } else { - // First time we're seeing this feed, add it to the database - const id = await Feed.create(feedData); - currentFeed = await Feed.byId(id); - } - - return currentFeed; -} - -/** - * Invalidates a feed - * @param feedData - Object containing feed data - */ -async function invalidateFeed(id, error) { - const feed = await Feed.byId(id); - await feed.setInvalid(error.message); - logger.info(`Invalidating feed ${feed.url} for the following reason: ${error.message}`); -} - -/** - * Process all of these Feed objects into Redis and the feed queue. - * @param {Array} feeds - the parsed feed Objects to be processed. - */ -function processFeeds(feeds) { - return Promise.all( - feeds.map(async (feed) => { - // Save this feed into the database if necessary. - const currentFeed = await updateFeed(feed); - // Add a job to the feed queue to process all of this feed's posts. - await feedQueue.addFeed({ id: currentFeed.id }); - }) - ); -} - -/** - * Download and parse feed author/URL info from the wiki and redis, and process - * these into Feed Objects to be added to the database and feed queue. - */ -async function processAllFeeds() { - try { - // Get an Array of Feed objects from the wiki feed list and Redis - const [all, wiki] = await Promise.all([Feed.all(), getWikiFeeds()]); - // Process these feeds into the database and feed queue - await processFeeds([...all, ...wiki]); - } catch (err) { - logger.error({ err }, 'Error queuing feeds'); - } -} - -function loadFeedsIntoQueue() { - logger.info('Loading all feeds into feed queue for processing'); - processAllFeeds().catch((error) => { - logger.error({ error }, 'Unable to enqueue feeds'); - }); -} - -/** - * When the feed queue is drained (all feeds are processed in the queue), - * restart the process again, and repeat forever. - */ -feedQueue.on('drained', loadFeedsIntoQueue); - -/** - * If there is a failure in the queue for a job, set the feed to invalid - * and save to Redis - */ -feedQueue.on('failed', (job, err) => - invalidateFeed(job.data.id, err).catch((error) => - logger.error({ error }, 'Unable to invalidate feed') - ) -); - -/** - * Also load all feeds now and begin processing. - */ -loadFeedsIntoQueue(); -feedWorker.start(); diff --git a/src/backend/lib/config.js b/src/backend/lib/config.js deleted file mode 100644 index 21e0c2b7aa..0000000000 --- a/src/backend/lib/config.js +++ /dev/null @@ -1,17 +0,0 @@ -const path = require('path'); -const dotenv = require('dotenv'); - -// Try and load our default .env file -const result = dotenv.config(); -if (result.error) { - // Try and use the example env instead if we're not in production - if (process.env.NODE_ENV !== 'production') { - const envSamplePath = path.resolve(process.cwd(), path.join('config', 'env.development')); - const secondAttempt = dotenv.config({ path: envSamplePath }); - if (secondAttempt.error) { - console.warn( - `Failed to load .env and ${envSamplePath} env files.\nSee https://github.com/Seneca-CDOT/telescope/blob/master/docs/CONTRIBUTING.md` - ); - } - } -} diff --git a/src/backend/lib/elastic.js b/src/backend/lib/elastic.js deleted file mode 100644 index 65bb8ca07f..0000000000 --- a/src/backend/lib/elastic.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * NOTE: This has been ported to ~/src/api/search - * Future updates will remove the codebase below - * and point towards `service.docker.localhost` for querying. - */ - -const { ELASTIC_URL, ELASTIC_PORT } = process.env; -const { Client } = require('@elastic/elasticsearch'); -const Mock = require('@elastic/elasticsearch-mock'); -const { logger } = require('../utils/logger'); -const parseUrl = require('../utils/url-parser'); - -function MockClient(options) { - const mock = new Mock(); - options.Connection = mock.getConnection(); - // Mock out various responses we'll need: - mock.add( - { - method: ['PUT', 'POST', 'GET', 'DELETE'], - path: '/posts/post/:post_id', - }, - () => { - return { status: 'ok' }; - } - ); - - const client = new Client(options); - // Provide a fake health check - client.cluster.health = () => Promise.resolve(); - - return client; -} - -// Set MOCK_ELASTIC=1 to mock, MOCK_ELASTIC= to use real elastic -const useMockElastic = process.env.MOCK_ELASTIC; - -// Use either a real Elastic Client or a Mock instance, depending on env setting -const ElasticConstructor = useMockElastic ? MockClient : Client; - -function createElasticClient() { - try { - const elasticUrl = parseUrl(ELASTIC_URL, ELASTIC_PORT) || 'http://localhost:9200'; - return new ElasticConstructor({ node: elasticUrl }); - } catch (error) { - const message = `Unable to parse elastic URL "${ELASTIC_URL}" and/or PORT "${ELASTIC_PORT}"`; - logger.error({ error }, message); - throw new Error(message); - } -} - -module.exports = { - // In case callers need to create a new elastic client - createElasticClient, - // Otherwise they can use this shared instance (most should use this) - client: createElasticClient(), -}; diff --git a/src/backend/lib/queue.js b/src/backend/lib/queue.js deleted file mode 100644 index f5f605c017..0000000000 --- a/src/backend/lib/queue.js +++ /dev/null @@ -1,103 +0,0 @@ -const Bull = require('bull'); -const { createRedisClient } = require('./redis'); -const { logger } = require('../utils/logger'); - -/** - * Shared redis connections for pub/sub, see: - * https://github.com/OptimalBits/bull/blob/28a2b9aa444d028fc5192c9bbdc9bb5811e77b08/PATTERNS.md#reusing-redis-connections - */ - -const client = createRedisClient(); -const subscriber = createRedisClient(); - -/** - * Tracks whether an informative message has been logged following a Redis connection failure - */ -let redisConnectionRefusalLogged = false; - -/** - * Create a Queue with the given `name` (String). - * We create a Bull Queue using either a real or mocked - * redis, and manage the creation of the redis connections. - * We also setup logging for this queue name. - */ -function createQueue(name) { - const queue = new Bull(name, { - createClient: (type) => { - switch (type) { - case 'client': - return client; - case 'subscriber': - return subscriber; - default: - return createRedisClient(); - } - }, - }) - .on('error', (error) => { - // An error occurred - if (error.code === 'ECONNREFUSED' && !redisConnectionRefusalLogged) { - logger.error( - '\n\n\t💡 It appears that Redis is not running on your machine.', - '\n\t Please see our documentation for how to install and run Redis:', - '\n\t https://github.com/Seneca-CDOT/telescope/blob/master/docs/CONTRIBUTING.md\n' - ); - redisConnectionRefusalLogged = true; - } else { - logger.error({ error }, `Queue ${name} error`); - } - }) - .on('waiting', (jobID) => { - // A job is waiting for the next idling worker - logger.debug(`Job ${jobID} is waiting.`); - }) - .on('active', (job) => { - // A job has started (use jobPromise.cancel() to abort it) - logger.debug(`Job ${job.id} is active`); - }) - .on('stalled', (job) => { - // A job was marked as stalled. This is useful for debugging - // which workers are crashing or pausing the event loop - logger.debug(`Job ${job.id} has stalled.`); - }) - .on('progress', (job, progress) => { - // A job's progress was updated - logger.debug(`Job ${job.id} progress:`, progress); - }) - .on('completed', (job) => { - // A job has been completed - logger.debug(`Job ${job.id} completed.`); - }) - .on('failed', (job, error) => { - // A job failed with an error - logger.error({ error }, `Job ${job.id} failed.`); - }) - .on('paused', () => { - // The queue was paused - logger.debug(`Queue ${name} paused.`); - }) - .on('resumed', (job) => { - // The queue resumed - logger.debug(`Queue ${name} resumed. ID: ${job.id}`); - }) - .on('cleaned', (jobs, types) => { - // Old jobs were cleaned from the queue - // 'Jobs' is an array of cleaned jobs - // 'Types' is an array of their types - logger.debug(`Queue ${name} was cleaned. Jobs: `, jobs, ' Types: ', types); - }) - .on('drained', () => { - // The queue was drained - // (the last item in the queue was returned by a worker) - logger.debug(`Queue ${name} was drained.`); - }) - .on('removed', (job) => { - logger.debug(`Job ${job.id} was removed.`); - }); - - return queue; -} - -module.exports = { - createQueue, -}; diff --git a/src/backend/lib/redis.js b/src/backend/lib/redis.js deleted file mode 100644 index 02594bfe3c..0000000000 --- a/src/backend/lib/redis.js +++ /dev/null @@ -1,39 +0,0 @@ -require('./config'); -const Redis = require('ioredis'); -const MockRedis = require('ioredis-mock'); -const { logger } = require('../utils/logger'); -const parseUrl = require('../utils/url-parser'); - -// If you need to set the Redis URL, do it in REDIS_URL -const redisUrl = - parseUrl(process.env.REDIS_URL, process.env.REDIS_PORT) || 'redis://127.0.0.1:6379'; - -// Set MOCK_REDIS=1 to mock, MOCK_REDIS= to use real redis -const useMockRedis = process.env.MOCK_REDIS; - -// RedisConstructor is one of Redis or MockRedis -const RedisConstructor = useMockRedis ? MockRedis : Redis; - -function createRedisClient() { - try { - const { port, host } = new URL(redisUrl); - return new RedisConstructor(port, host, { password: process.env.REDIS_PASSWORD }); - } catch (error) { - const message = `Unable to parse port and host from "${redisUrl}"`; - logger.error({ error }, message); - throw new Error(message); - } -} - -// If using MockRedis, shim info() until https://github.com/stipsan/ioredis-mock/issues/841 ships -if (useMockRedis && typeof MockRedis.prototype.info !== 'function') { - logger.debug('Shimming MockRedis info() method'); - MockRedis.prototype.info = () => Promise.resolve('redis_version:999.999.999'); -} - -module.exports = { - // If callers need to create a new redis instance, they'll use the ctor - createRedisClient, - // Otherwise they can use this shared instance (most should use this) - redis: createRedisClient(), -}; diff --git a/src/backend/lib/shutdown.js b/src/backend/lib/shutdown.js deleted file mode 100644 index 58fcd8df62..0000000000 --- a/src/backend/lib/shutdown.js +++ /dev/null @@ -1,66 +0,0 @@ -const { promisify } = require('util'); - -const { feedQueue } = require('../feed/queue'); -const { logger } = require('../utils/logger'); -const server = require('../web/server'); - -let isShuttingDown = false; - -async function stopQueue() { - try { - await feedQueue.close(); - logger.info('Feed queue shut down.'); - } catch (error) { - logger.debug({ error }, 'Unable to close feed queue gracefully'); - } -} - -async function stopWebServer() { - // Use stopppable's server.stop() instead of httpServer.close() - // to force connections to close as well. See: - // https://github.com/hunterloftis/stoppable - const serverClose = promisify(server.stop.bind(server)); - try { - await serverClose(); - logger.info('Web server shut down.'); - } catch (error) { - logger.debug({ error }, 'Unable to close web server gracefully'); - } -} - -async function cleanShutdown() { - try { - await Promise.all([stopQueue(), stopWebServer()]); - logger.info('Completing shut down.'); - } catch (error) { - logger.debug({ error }, 'Failed to perform clean shutdown'); - } -} - -function shutdown(signal) { - return async (error) => { - if (isShuttingDown) { - return; - } - - logger.info(`Received ${signal}, starting shut down`); - isShuttingDown = true; - - if (error) { - logger.error({ error }); - } - - // If our attempts to shut down cleanly don't work, force it - setTimeout(() => { - logger.error('Could not close connections in time, forcefully shutting down'); - logger.flush(); - process.exit(1); - }, 10000).unref(); - - // Try to shut down cleanly - await cleanShutdown(); - process.exit(error ? 1 : 0); - }; -} - -module.exports = shutdown; diff --git a/src/backend/utils/__mocks__/indexer.js b/src/backend/utils/__mocks__/indexer.js deleted file mode 100644 index 7169d44a7d..0000000000 --- a/src/backend/utils/__mocks__/indexer.js +++ /dev/null @@ -1,64 +0,0 @@ -// Mock storage for our es data -const db = { - results: 0, - values: [], -}; - -const indexPost = ({ text, id, title, published, author }) => { - db.values.push({ - index: 'posts', - type: 'post', - id, - body: { - text, - title, - published, - author, - }, - score: 10, - }); - db.results = db.values.length; - return Promise.resolve(); -}; - -const deletePost = (postId) => { - db.values = db.values.filter((value) => Object.values(value).includes(postId)); - db.results = db.values.length; - return Promise.resolve(); -}; - -const createFieldsFromFilter = (filter) => { - switch (filter) { - case 'author': - return ['author']; - case 'post': - default: - return ['text', 'title']; - } -}; - -const search = (textToSearch = '', filter = 'post') => { - const fields = createFieldsFromFilter(filter); - let filtered = db.values; - fields.forEach((field) => { - filtered = filtered.filter((value) => value.body[field].includes(textToSearch)); - }); - return Promise.resolve({ - results: filtered.length, - values: filtered.map((value) => ({ id: value.id })), - }); -}; - -const checkConnection = () => Promise.resolve(); - -const waitOnReady = () => Promise.resolve(); - -module.exports = { - // Expose the internal db for testing - db, - indexPost, - deletePost, - checkConnection, - search, - waitOnReady, -}; diff --git a/src/backend/utils/html/dom.js b/src/backend/utils/html/dom.js deleted file mode 100644 index 6f4de96749..0000000000 --- a/src/backend/utils/html/dom.js +++ /dev/null @@ -1,23 +0,0 @@ -const { TextEncoder, TextDecoder } = require('util'); - -global.TextEncoder = TextEncoder; -global.TextDecoder = TextDecoder; - -const jsdom = require('jsdom'); - -const { JSDOM } = jsdom; - -/** - * Parse and return the String `html` into a JSDOM instance, or return `null`. - */ -module.exports = function toDOM(html) { - if (typeof html !== 'string') { - return null; - } - - try { - return new JSDOM(html); - } catch (err) { - return null; - } -}; diff --git a/src/backend/utils/html/fix-iframe-width.js b/src/backend/utils/html/fix-iframe-width.js deleted file mode 100644 index 516598e9a7..0000000000 --- a/src/backend/utils/html/fix-iframe-width.js +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Take an '; - const result = fixIFrameWidth(original); - expect(result).toEqual('
'); - }); - - test('Multiple '; - const result = fixIFrameWidth(original); - expect(result).toEqual( - '
' - ); - }); -}); diff --git a/test/fixtures.js b/test/fixtures.js deleted file mode 100644 index 19a06715c7..0000000000 --- a/test/fixtures.js +++ /dev/null @@ -1,182 +0,0 @@ -const nock = require('nock'); -const fs = require('fs'); -const path = require('path'); - -/** - * This is a fixture module for telescope tests which contains measures to - * maintain a reproducible environment for system testing - */ - -const getAtomUri = () => 'https://test321.blogspot.com/feeds/posts/default/-/open-source'; -const getRssUri = () => 'https://test321.blogspot.com/feeds/posts/default/-/open-source?alt=rss'; -const getHtmlUri = () => 'https://test321.blogspot.com/blog'; -// Remove leading protocol from a URI -const stripProtocol = (uri) => uri.replace(/^https?:\/\//, ''); - -// Use blog.humphd.org as a more realistic test case -const getRealWorldRssUri = () => 'https://blog.humphd.org/tag/seneca/rss/'; -const getRealWorldRssBody = () => - fs.readFileSync(path.join(__dirname, './test_files/blog.humphd.org.rss')); - -// Use David Humphrey's channel for a realistic test case of YouTube channel -const getRealWorldYouTubeFeedUri = () => - 'https://www.youtube.com/feeds/videos.xml?channel_id=UCqaMbMDf01BLttof1lHAo2A'; -const getRealWorldYouTubeFeedBody = () => - fs.readFileSync(path.join(__dirname, './test_files/humphd-yt-channel.xml')); - -// Portion of https://www.feedforall.com/sample.xml -const getValidFeedBody = () => - ` - - - - FeedForAll Sample Feed - RSS is a fascinating technology. The uses for RSS are expanding daily. Take a closer look at how various industries are using the benefits of RSS in their businesses. - http://www.feedforall.com/industry-solutions.htm - Computers/Software/Internet/Site Management/Content Management - Copyright 2004 NotePage, Inc. - http://blogs.law.harvard.edu/tech/rss - en-us - Tue, 19 Oct 2004 13:39:14 -0400 - marketing@feedforall.com - Tue, 19 Oct 2004 13:38:55 -0400 - webmaster@feedforall.com - FeedForAll Beta1 (0.0.1.8) - - http://www.feedforall.com/ffalogo48x48.gif - FeedForAll Sample Feed - http://www.feedforall.com/industry-solutions.htm - FeedForAll Sample Feed - 48 - 48 - - - RSS Solutions for Restaurants - <b>FeedForAll </b>helps Restaurant's communicate with customers. Let your customers know the latest specials or events.<br> - <br> - RSS feed uses include:<br> - <i><font color="#FF0000">Daily Specials <br> - Entertainment <br> - Calendar of Events </i></font> - http://www.feedforall.com/restaurant.htm - Computers/Software/Internet/Site Management/Content Management - http://www.feedforall.com/forum - Tue, 19 Oct 2004 11:09:11 -0400 - - - RSS Solutions for Schools and Colleges - FeedForAll helps Educational Institutions communicate with students about school wide activities, events, and schedules.<br> - <br> - RSS feed uses include:<br> - <i><font color="#0000FF">Homework Assignments <br> - School Cancellations <br> - Calendar of Events <br> - Sports Scores <br> - Clubs/Organization Meetings <br> - Lunches Menus </i></font> - http://www.feedforall.com/schools.htm - Computers/Software/Internet/Site Management/Content Management - http://www.feedforall.com/forum - Tue, 19 Oct 2004 11:09:09 -0400 - - - -`; - -const getEmptyFeedBody = () => - ` - - - - - `; - -const getValidHtmlBody = () => - ` - - - - - HTML Page - - -

HTML, NOT XML

- - - `; - -const getInvalidDescription = () => - ` -

-






-

-

-

-

- `; - -/** - * Generic network nock request, used below to define all our mock requests. - * - * @param {String} uri - the full, absolute URL to use for this mock network request - * @param {String} body - the body to return - * @param {Number} httpResponseCode - the HTTP result code - * @param {String} mimeType - the mime type to use for the response - */ -function nockResponse(uri, body, httpResponseCode, mimeType, headers) { - const { protocol, host, pathname, search } = new URL(uri); - nock(`${protocol}//${host}`) - .get(`${pathname}${search || ''}`) - .reply(httpResponseCode, body, { - 'Content-Type': mimeType, - ...headers, - }); -} - -exports.getAtomUri = getAtomUri; -exports.getRssUri = getRssUri; -exports.getHtmlUri = getHtmlUri; -exports.getRealWorldRssUri = getRealWorldRssUri; -exports.getRealWorldYouTubeFeedUri = getRealWorldYouTubeFeedUri; -exports.stripProtocol = stripProtocol; -exports.getInvalidDescription = getInvalidDescription; - -exports.getValidFeedBody = getValidFeedBody; -exports.getEmptyFeedBody = getEmptyFeedBody; -exports.getValidHtmlBody = getValidHtmlBody; - -exports.nockValidAtomResponse = function (headers = {}) { - nockResponse(getAtomUri(), getValidFeedBody(), 200, 'application/rss+xml', headers); -}; - -exports.nockValidRssResponse = function (headers = {}) { - nockResponse(getRssUri(), getValidFeedBody(), 200, 'application/rss+xml', headers); -}; - -exports.nockInvalidRssResponse = function (headers = {}) { - nockResponse(getRssUri(), getEmptyFeedBody(), 200, 'application/rss+xml', headers); -}; - -exports.nockValidHtmlResponse = function (headers = {}) { - nockResponse(getHtmlUri(), getValidHtmlBody(), 200, 'text/html', headers); -}; - -exports.nock404Response = function (headers = {}) { - nockResponse(getHtmlUri(), 'Not Found', 404, 'text/html', headers); -}; - -exports.nockRealWorldRssResponse = function (headers = {}) { - nockResponse(getRealWorldRssUri(), getRealWorldRssBody(), 200, 'application/rss+xml', headers); -}; - -exports.nockRealWorldYouTubeFeedResponse = function (headers = {}) { - nockResponse( - getRealWorldYouTubeFeedUri(), - getRealWorldYouTubeFeedBody(), - 200, - 'application/rss+xml', - headers - ); -}; - -exports.createMockJobObjectFromFeedId = (id) => ({ data: { id } }); diff --git a/test/hash.test.js b/test/hash.test.js deleted file mode 100644 index dc3366ed2d..0000000000 --- a/test/hash.test.js +++ /dev/null @@ -1,15 +0,0 @@ -const hash = require('../src/backend/data/hash'); - -describe('hash function tests', () => { - it('should return a 10 character hash', () => { - expect(hash('input').length).toBe(10); - }); - - it('should properly hash a string', () => { - expect(hash('input')).toBe('c96c6d5be8'); - }); - - it('should return a different string if anything changes', () => { - expect(hash('input2')).toBe('124d8541ff'); - }); -}); diff --git a/test/jest.config.js b/test/jest.config.js deleted file mode 100644 index f438d19877..0000000000 --- a/test/jest.config.js +++ /dev/null @@ -1,9 +0,0 @@ -const baseConfig = require('../jest.config.base'); - -module.exports = { - ...baseConfig, - rootDir: '../', - setupFiles: ['/test/jest.setup.js'], - testMatch: ['/test/**/*.test.js'], - collectCoverageFrom: ['/src/backend/**/*.js'], -}; diff --git a/test/jest.setup.js b/test/jest.setup.js deleted file mode 100644 index 2cd680f852..0000000000 --- a/test/jest.setup.js +++ /dev/null @@ -1,11 +0,0 @@ -const fetch = require('jest-fetch-mock'); - -// Mock fetch for the Telescope 1.0 back-end tests -jest.setMock('node-fetch', fetch); - -// Config variables for testing -process.env = { - ...process.env, - NODE_ENV: 'test', - FEED_URL_INTERVAL_MS: '200', -}; diff --git a/test/lazy-load.test.js b/test/lazy-load.test.js deleted file mode 100644 index 3225a88c2d..0000000000 --- a/test/lazy-load.test.js +++ /dev/null @@ -1,79 +0,0 @@ -/** - * @jest-environment jsdom - */ - -const toDOM = require('../src/backend/utils/html/dom'); -const lazy = require('../src/backend/utils/html/lazy-load'); - -function lazyLoad(html) { - const dom = toDOM(html); - lazy(dom); - return dom.window.document.body.innerHTML; -} - -/** - * lazyLoad() will update and '; - const result = lazyLoad(original); - expect(result).toEqual(''); - }); - - test('An '; - const result = lazyLoad(original); - expect(result).toEqual(''); - }); - - test('Multiple '; - const result = lazyLoad(original); - expect(result).toEqual(original); - }); - }); -}); diff --git a/test/lib/authentication.js b/test/lib/authentication.js deleted file mode 100644 index 22e6e7bf12..0000000000 --- a/test/lib/authentication.js +++ /dev/null @@ -1,22 +0,0 @@ -// NOTE: you must mock the authentication provider where you require this: -// jest.mock('../src/backend/web/authentication'); - -const { init } = require('../../src/backend/web/authentication'); - -const defaultName = 'user1'; -const defaultEmail = 'user1@example.com'; - -// Login as a regular user -module.exports.login = function (name, email, isAdmin = false) { - return init(name || defaultName, email || defaultEmail, isAdmin); -}; - -// Login as an Admin -module.exports.loginAdmin = function (name, email) { - return module.exports.login(name, email, true); -}; - -// Logout -module.exports.logout = function () { - init(); -}; diff --git a/test/logger.test.js b/test/logger.test.js deleted file mode 100644 index 8bb2964665..0000000000 --- a/test/logger.test.js +++ /dev/null @@ -1,14 +0,0 @@ -const { logger } = require('../src/backend/utils/logger'); - -// This test helps determine whether the type of -// logger methods are functions - -test('logger.methods to be functions', () => { - expect(typeof logger.debug).toBe('function'); - expect(typeof logger.info).toBe('function'); - expect(typeof logger.child).toBe('function'); - expect(typeof logger.error).toBe('function'); - expect(typeof logger.trace).toBe('function'); - expect(typeof logger.warn).toBe('function'); - expect(typeof logger.fatal).toBe('function'); -}); diff --git a/test/modify-pre.test.js b/test/modify-pre.test.js deleted file mode 100644 index 7294caa9db..0000000000 --- a/test/modify-pre.test.js +++ /dev/null @@ -1,36 +0,0 @@ -const toDOM = require('../src/backend/utils/html/dom'); -const fixEmptyPre = require('../src/backend/utils/html/modify-pre'); - -function fixEmpties(htmlData) { - const dom = toDOM(htmlData); - fixEmptyPre(dom); - return dom.window.document.body.innerHTML; -} - -describe('modify pre tag without element tests', () => { - test('html body without pre tags should not be changed', () => { - const og = '

Hello World

'; - const res = fixEmpties(og); - expect(res).toEqual(og); - }); - - test('pre tags with inner elements should not be changed', () => { - const og = '
console.log("Hello World")
'; - const res = fixEmpties(og); - expect(res).toEqual(og); - }); - - test('pre tags without inner elements should be fixed', () => { - const og = '
console.log("Hello World")
'; - const res = fixEmpties(og); - const fix = '
console.log("Hello World")
'; - expect(res).toEqual(fix); - }); - - test('pre tag with child
elements should be fixed', () => { - const og = '

console.log("Hello World")
'; - const res = fixEmpties(og); - const fix = '
\nconsole.log("Hello World")\n
'; - expect(res).toEqual(fix); - }); -}); diff --git a/test/post.test.js b/test/post.test.js deleted file mode 100644 index 4ad1099ce0..0000000000 --- a/test/post.test.js +++ /dev/null @@ -1,317 +0,0 @@ -const Parser = require('rss-parser'); - -const parse = new Parser({ - customFields: { - item: [ - ['pubDate', 'pubdate'], - ['creator', 'author'], - ['content:encoded', 'contentEncoded'], - ['updated', 'date'], - ['id', 'guid'], - ['media:group', 'mediaGroup'], - ['published', 'pubdate'], - ], - }, -}); - -const { - nockRealWorldRssResponse, - nockRealWorldYouTubeFeedResponse, - getRealWorldYouTubeFeedUri, - getRealWorldRssUri, - getInvalidDescription, -} = require('./fixtures'); -const Post = require('../src/backend/data/post'); -const Feed = require('../src/backend/data/feed'); -const hash = require('../src/backend/data/hash'); - -jest.mock('../src/backend/utils/indexer'); - -describe('Post data class tests', () => { - let feed; - - const data = { - title: 'Post Title', - html: '

post text

', - published: new Date('Thu, 20 Nov 2014 18:59:18 UTC'), - updated: new Date('Thu, 20 Nov 2014 18:59:18 UTC'), - url: 'https://user.post.com/?post-id=123', - guid: 'https://user.post.com/?post-id=123&guid', - id: hash('https://user.post.com/?post-id=123&guid'), - type: 'blogpost', - }; - - beforeAll(async () => { - const id = await Feed.create({ - author: 'Feed Author', - url: 'http://feed-url.com/', - }); - feed = await Feed.byId(id); - - // Set the feed property for our data to this feed - data.feed = feed; - }); - - const text = 'post text'; - - const createPost = () => - new Post(data.title, data.html, data.published, data.updated, data.url, data.guid, feed); - - test('Post should be a function', () => { - expect(typeof Post).toBe('function'); - }); - - test('Post constructor should populate all expected properties', () => { - const post = createPost(); - expect(post.id).toEqual(data.id); - expect(post.title).toEqual(data.title); - expect(post.html).toEqual(data.html); - expect(post.published).toEqual(data.published); - expect(post.updated).toEqual(data.updated); - expect(post.guid).toEqual(data.guid); - expect(post.feed).toEqual(feed); - }); - - test('Post constructor should work with with published and updated as Strings', () => { - const post1 = createPost(); - const post2 = new Post( - data.title, - data.html, - 'Thu, 20 Nov 2014 18:59:18 UTC', - 'Thu, 20 Nov 2014 18:59:18 UTC', - data.url, - data.guid, - feed - ); - - expect(post1).toEqual(post2); - }); - - test('Post constructor should work with with published and updated as Dates', () => { - const post1 = createPost(); - const post2 = new Post( - data.title, - data.html, - new Date('Thu, 20 Nov 2014 18:59:18 UTC'), - new Date('Thu, 20 Nov 2014 18:59:18 UTC'), - data.url, - data.guid, - feed - ); - - expect(post1).toEqual(post2); - }); - - test('Post constructor should throw if feed is missing or not a Feed instance', () => { - const createPostWithFeed = (f) => - new Post(data.title, data.html, data.published, data.updated, data.url, data.guid, f); - - expect(() => createPostWithFeed(null)).toThrow(); - expect(() => createPostWithFeed(undefined)).toThrow(); - expect(() => createPostWithFeed('feedid')).toThrow(); - expect(() => createPostWithFeed(1234)).toThrow(); - expect(() => createPostWithFeed({})).toThrow(); - expect(() => createPostWithFeed(feed)).not.toThrow(); - }); - - test('Post constructor should throw if a string or date is not passed', () => { - const createPostWithDates = (datePublished, dateUpdated) => - new Post(data.title, data.html, datePublished, dateUpdated, data.url, data.guid, feed); - expect(() => - createPostWithDates( - new Date('Thu, 20 Nov 2014 18:59:18 UTC'), - new Date('Fri, 28 Nov 2014 18:59:18 UTC') - ) - ).not.toThrow(); - expect(() => - createPostWithDates(new Date('Thu, 20 Nov 2014 18:59:18 UTC'), 'string') - ).not.toThrow(); - expect(() => - createPostWithDates('string', new Date('Thu, 20 Nov 2014 18:59:18 UTC')) - ).not.toThrow(); - expect(() => - createPostWithDates('Thu, 45 Nov 2014 18:59:18 UTC', 'Thu, 35 Dec 2014 18:59:18 UTC') - ).not.toThrow(); - expect(() => createPostWithDates(10, 20)).toThrow(); - }); - - test('Post.create() should be able to parse an Object into a Post', async () => { - const id = await Post.create(data); - const expectedId = 'a371654c75'; - expect(id).toEqual(expectedId); - const post = await Post.byId(expectedId); - expect(post).toEqual(createPost()); - expect(post.feed instanceof Feed).toBe(true); - }); - - test('Posts should have a (dynamic) text property', async () => { - const id = await Post.create(data); - const post = await Post.byId(id); - expect(post.text).toEqual(text); - }); - - test('Post.create() should work with missing fields', async () => { - const missingData = { ...data, updated: null, feed }; - - // Make sure that updated was turned into a date - const id = await Post.create(missingData); - const parsed = await Post.byId(id); - expect(parsed.updated).toBeDefined(); - expect(typeof parsed.updated.getTime).toEqual('function'); - }); - - test('Post.save() and Post.byId() should both work as expected', async () => { - const id = await Post.create(data); - const post = await Post.byId(id); - expect(post).toEqual(data); - - // Modify the post and save - post.title = 'updated title'; - await post.save(); - - // Get it back again - const post2 = await Post.byId(id); - const data2 = { ...data, title: 'updated title' }; - expect(post2).toEqual(data2); - }); - - test('Post.byId() with invalid id should return null', async () => { - const post = createPost(); - await post.save(); - const result = await Post.byId('invalid id'); - expect(result).toBe(null); - }); - - describe('Post.createFromArticle() with blog feeds tests', () => { - let articles; - beforeEach(async () => { - nockRealWorldRssResponse(); - articles = await parse.parseURL(getRealWorldRssUri()); - - expect(Array.isArray(articles.items)).toBe(true); - expect(articles.items.length).toBe(15); - }); - - test('should throw if passed no article', async () => { - let err; - try { - await Post.createFromArticle(null, feed); - } catch (error) { - err = error; - } - expect(err).toBeDefined(); - }); - - test('should throw if passed nothing', async () => { - let err; - try { - await Post.createFromArticle(); - } catch (error) { - err = error; - } - expect(err).toBeDefined(); - }); - - test('should work with real world RSS', async () => { - const article = articles.items[0]; - const id = await Post.createFromArticle(article, feed); - const post = await Post.byId(id); - - expect(post instanceof Post).toBe(true); - expect(post.title).toEqual('Teaching Open Source, Fall 2019'); - expect( - post.html.startsWith(`

Today I've completed another semester of teaching open source`) - ).toBe(true); - expect(post.text.startsWith("Today I've completed another semester of teaching open source")); - expect(post.published).toEqual(new Date('Mon, 16 Dec 2019 20:37:14 GMT')); - expect(post.updated).toEqual(new Date('Mon, 16 Dec 2019 20:37:14 GMT')); - expect(post.url).toEqual('https://blog.humphd.org/open-source-fall-2019/'); - expect(post.guid).toEqual('5df7bbd924511e03496b4734'); - expect(post.id).toEqual(hash(post.guid)); - expect(post.feed).toEqual(feed); - }); - - test('when missing description should throw', async () => { - const article = articles.items[0]; - delete article.content; - delete article.contentEncoded; - delete article.contentSnippet; - await expect(Post.createFromArticle(article, feed)).rejects.toThrow(); - }); - - test('Post.createFromArticle() with missing pubdate should throw', async () => { - const article = articles.items[0]; - delete article.pubdate; - await expect(Post.createFromArticle(article, feed)).rejects.toThrow(); - }); - - test('Post.createFromArticle() with missing date should not throw', async () => { - const article = articles.items[0]; - delete article.date; - const id = await Post.createFromArticle(article, feed); - const post = await Post.byId(id); - expect(post.published).toBeDefined(); - expect(typeof post.published.getTime).toEqual('function'); - }); - - test('Post.createFromArticle() with missing link should throw', async () => { - const article = articles.items[0]; - delete article.link; - - let err; - try { - await Post.createFromArticle(article, feed); - } catch (error) { - err = error; - } - expect(err).toBeDefined(); - }); - - test('Post.createFromArticle() with missing guid should throw', async () => { - const article = articles.items[0]; - delete article.guid; - await expect(Post.createFromArticle(article, feed)).rejects.toThrow(); - }); - - test('Post.createFromArticle() with missing title should use Untitled', async () => { - const article = articles.items[0]; - delete article.title; - const id = await Post.createFromArticle(article, feed); - const post = await Post.byId(id); - expect(post.title).toEqual('Untitled'); - }); - - test('Post.createFromArticle() with whitespace only in description should throw', async () => { - const article = articles.items[0]; - const invalidDescription = getInvalidDescription(); - article.content = invalidDescription; - article.contentEncoded = invalidDescription; - article.contentSnippet = invalidDescription; - await expect(Post.createFromArticle(article, feed)).rejects.toThrow(); - }); - }); - - describe('Post.createFromArticle() with youtube feeds tests', () => { - let articles; - beforeEach(async () => { - nockRealWorldYouTubeFeedResponse(); - articles = await parse.parseURL(getRealWorldYouTubeFeedUri()); - - expect(Array.isArray(articles.items)).toBe(true); - expect(articles.items.length).toBe(15); - }); - - test('Post.createFromArticle() should create Post with YouTube video article, with linkified content', async () => { - const article = articles.items[0]; - const id = await Post.createFromArticle(article, feed); - const videoPost = await Post.byId(id); - - expect(videoPost.title).toBe('DPS909 OSD600 Week 03 - Fixing a Bug in the Azure JS SDK'); - expect(videoPost.url).toBe('https://www.youtube.com/watch?v=mNuHA7vH6Wc'); - expect(videoPost.type).toBe('video'); - expect(videoPost.html).toBe( - 'Walkthrough and discussion of fixing a bug in https://github.com/Azure/azure-sdk-for-js. Issue at https://github.com/Azure/azure-sdk-for-js/issues/15772. PR at https://github.com/Azure/azure-sdk-for-js/pull/17820.' - ); - }); - }); -}); diff --git a/test/process-html.test.js b/test/process-html.test.js deleted file mode 100644 index c24c812c1d..0000000000 --- a/test/process-html.test.js +++ /dev/null @@ -1,51 +0,0 @@ -const process = require('../src/backend/utils/html/index'); - -describe('Process HTML', () => { - test('Unknown tags within code tag should be stay the same', () => { - const content = `

Hello<Hello> <HelloWorld/> </HelloWorld2>
`; - const expectedData = `
Hello<Hello> <HelloWorld/> </HelloWorld2>
`; - const data = process(content); - expect(data).toBe(expectedData); - }); - - test('https://github.com/Seneca-CDOT/telescope/issues/2313', () => { - const data = process( - `
const html = data.replace (/^## (.*$)/gim, '<h2>$1</h2>')
` - ); - const expectedData = `
const html = data.replace (/^## (.*$)/gim, '<h2>$1</h2>')
`; - expect(data).toBe(expectedData); - }); - - test('https://github.com/Seneca-CDOT/telescope/issues/2220', () => { - const data = process( - `
<img src={slackLogo} alt="logo"/>
` - ); - const expectedData = - '
<img src={slackLogo} alt="logo"/>
'; - expect(data).toBe(expectedData); - }); - - test('Double escape character stay the same', () => { - const data = process(`&& &<`); - const expectedData = `&& &<`; - expect(data).toBe(expectedData); - }); - - test('Escape character stay the same', () => { - const data = process(`&lt; ;&`); - const expectedData = `&lt; ;&`; - expect(data).toBe(expectedData); - }); - - test('https://github.com/Seneca-CDOT/telescope/issues/1091', () => { - const data = process(`['12', ...].map((number) =>; - twilio.messages.create({ - body: '...' - `); - const expectedData = `['12', ...].map((number) =>; - twilio.messages.create({ - body: '...' - `; - expect(data).toBe(expectedData); - }); -}); diff --git a/test/query.test.js b/test/query.test.js deleted file mode 100644 index 80de32b022..0000000000 --- a/test/query.test.js +++ /dev/null @@ -1,118 +0,0 @@ -const request = require('supertest'); -const app = require('../src/backend/web/app'); - -jest.mock('../src/backend/utils/indexer'); - -describe('Testing query route', () => { - test('Testing with valid length of characters for text', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post`); - expect(res.status).toBe(200); - }); - - test('Testing with invalid length of characters for text', async () => { - // searchTerm is 257 chars long - const searchTerm = encodeURIComponent( - '8l4XOYWZ3SA9aevIozTcEAng3GOSCAiiDARThEkAFn2F2YtBexA3lcg1O38SGSHILQrrNYReKWOC6RM4ZQQIGqZoLSOLlbbYqlfSkIDM83aeGDYW7KU8OSLbIXUIWIF4TINwrjxi453biwyjgYsJeqFx9ORd0EIw3dMwGPWhoMbvTIxUWXV032qgPRmohLbTf8xnMyttPjIOk3rHBXpukWSnkZiKyBMsUniZZnxYPw7yIhfoaS77jIPRUuiQufDdO' - ); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post`); - expect(searchTerm.length).toBeGreaterThan(256); - expect(res.status).toBe(400); - }); - - test('Testing with missing text param', async () => { - const res = await request(app).get('/query?filter=post'); - expect(res.status).toBe(400); - }); - - test('Testing with missing filter param', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}`); - expect(res.status).toBe(400); - }); - - test('Testing with invalid filter', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=test`); - expect(res.status).toBe(400); - }); - - test('Testing with valid post filter', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post`); - expect(res.status).toBe(200); - }); - - test('Testing with valid author filter', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=author`); - expect(res.status).toBe(200); - }); - - test('Testing with empty param values', async () => { - const res = await request(app).get(`/query?text=&filter=`); - expect(res.status).toBe(400); - }); - - test('Testing with empty page value', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=`); - expect(res.status).toBe(400); - }); - - test('Testing with invalid page value', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=invalid`); - expect(res.status).toBe(400); - }); - - test('Testing with page above range', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=1000`); - expect(res.status).toBe(400); - }); - - test('Testing with page below range', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=-1`); - expect(res.status).toBe(400); - }); - - test('Testing with valid page in range', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=1`); - expect(res.status).toBe(200); - }); - - test('Testing with empty per page value', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=0&perPage=`); - expect(res.status).toBe(400); - }); - - test('Testing with invalid per page value', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get( - `/query?text=${searchTerm}&filter=post&page=0&perPage=invalid` - ); - expect(res.status).toBe(400); - }); - - test('Testing with per page above range ', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=0&perPage=11`); - expect(res.status).toBe(400); - }); - - test('Testing with per page below range ', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=0&perPage=0`); - expect(res.status).toBe(400); - }); - - test('Testing with valid per page in range ', async () => { - const searchTerm = encodeURIComponent('I Love Telescope'); - const res = await request(app).get(`/query?text=${searchTerm}&filter=post&page=0&perPage=3`); - expect(res.status).toBe(200); - }); -}); diff --git a/test/remove-empty-anchor.test.js b/test/remove-empty-anchor.test.js deleted file mode 100644 index ee77fb537c..0000000000 --- a/test/remove-empty-anchor.test.js +++ /dev/null @@ -1,102 +0,0 @@ -const toDom = require('../src/backend/utils/html/dom'); -const removeEmptyAnchor = require('../src/backend/utils/html/remove-empty-anchor'); - -function removeNoContentAnchor(html) { - const dom = toDom(html); - removeEmptyAnchor(dom); - return dom.window.document.body.innerHTML; -} - -/** - * removeNoContentAnchor() will remove all anchors that have no content - * but keep those that include media tags - */ -describe('Remove no content anchor tags', () => { - // Test to see if get removed - test('Should remove when parent is empty eg.
', () => { - const HTMLBefore = '
'; - const HTMLAfter = removeNoContentAnchor(HTMLBefore); - const HTMLExpected = '
'; - - expect(HTMLAfter).toEqual(HTMLExpected); - }); - - test('Should remove when parent is not empty eg.', () => { - const HTMLBefore = '
This is the content of the outer div
'; - const HTMLAfter = removeNoContentAnchor(HTMLBefore); - const HTMLExpected = '
This is the content of the outer div
'; - - expect(HTMLAfter).toEqual(HTMLExpected); - }); - - // Anchor tags with text content should not be removed - test('Should not remove foo', () => { - const htmlData = ''; - const htmlDataAfter = removeNoContentAnchor(htmlData); - - expect(htmlDataAfter).toEqual(htmlData); - }); - - // Anchor tags with text content and href attribute should not be removed - test('Should not remove localhost or abc', () => { - const firstHtmlData = ''; - const firstHtmlDataAfter = removeNoContentAnchor(firstHtmlData); - - const secondHtmlData = ''; - const secondHtmlDataAfter = removeNoContentAnchor(secondHtmlData); - - expect(firstHtmlDataAfter).toEqual(firstHtmlData); - expect(secondHtmlDataAfter).toEqual(secondHtmlData); - }); - - // Show recognize empty anchors in a sequence of anchors - test('Should only keep third anchor in should still exist', () => { - const htmlData = 'should still exist'; - const htmlDataAfter = removeNoContentAnchor(htmlData); - const htmlDataExpected = 'should still exist'; - expect(htmlDataAfter).toEqual(htmlDataExpected); - }); - - describe('Should keep anchors contain media tags only', () => { - // Should not remove anchor if it contain a single media tag. Media tags include 'img', 'audio', 'video', 'picture', 'svg', 'object', 'map', 'iframe', 'embed' - test('Should not remove anchors containing single media tag eg: ', () => { - const withImg = '
'; - const withAudio = '
'; - const withVideo = '
'; - const withPicture = '
'; - const withSvg = '
'; - const withObject = '
'; - const withMap = '
'; - const withIframe = '
'; - const withEmbed = '
'; - - const withImgAfter = removeNoContentAnchor(withImg); - const withAudioAfter = removeNoContentAnchor(withAudio); - const withVideoAfter = removeNoContentAnchor(withVideo); - const withPictureAfter = removeNoContentAnchor(withPicture); - const withSvgAfter = removeNoContentAnchor(withSvg); - const withObjectAfter = removeNoContentAnchor(withObject); - const withMapAfter = removeNoContentAnchor(withMap); - const withIframeAfter = removeNoContentAnchor(withIframe); - const withEmbedAfter = removeNoContentAnchor(withEmbed); - - expect(withImgAfter).toEqual(withImg); - expect(withAudioAfter).toEqual(withAudio); - expect(withVideoAfter).toEqual(withVideo); - expect(withPictureAfter).toEqual(withPicture); - expect(withSvgAfter).toEqual(withSvg); - expect(withObjectAfter).toEqual(withObject); - expect(withMapAfter).toEqual(withMap); - expect(withIframeAfter).toEqual(withIframe); - expect(withEmbedAfter).toEqual(withEmbed); - }); - - // Should not remove anchor if it contain multiple media tags - test('Should not remove anchors containing multiple media tags eg: ', () => { - const htmlData = '
'; - const htmlDataAfter = removeNoContentAnchor(htmlData); - - expect(htmlDataAfter).toEqual(htmlData); - }); - }); -}); diff --git a/test/remove-empty-paragraphs.test.js b/test/remove-empty-paragraphs.test.js deleted file mode 100644 index 86f31fedd0..0000000000 --- a/test/remove-empty-paragraphs.test.js +++ /dev/null @@ -1,42 +0,0 @@ -const toDom = require('../src/backend/utils/html/dom'); -const removeEmptyParagraphs = require('../src/backend/utils/html/remove-empty-paragraphs'); - -describe('Remove no content anchor tags', () => { - test('should remove

', () => { - const htmlData = toDom('

'); - removeEmptyParagraphs(htmlData); - - const expectedHtml = '
'; - expect(htmlData.window.document.body.innerHTML).toEqual(expectedHtml); - }); - - test('should remove

(spaces)', () => { - const htmlData = toDom('

'); - removeEmptyParagraphs(htmlData); - - const expectedHtml = '
'; - expect(htmlData.window.document.body.innerHTML).toEqual(expectedHtml); - }); - - test('should remove

(tabs)', () => { - const htmlData = toDom('

'); - removeEmptyParagraphs(htmlData); - - const expectedHtml = '
'; - expect(htmlData.window.document.body.innerHTML).toEqual(expectedHtml); - }); - - test('should remove

⠀ ⠀

(braille)', () => { - const htmlData = toDom('

⠀ ⠀

'); - removeEmptyParagraphs(htmlData); - const expectedHtml = '
'; - expect(htmlData.window.document.body.innerHTML).toEqual(expectedHtml); - }); - - test('should remove


(line break)', () => { - const htmlData = toDom('


'); - removeEmptyParagraphs(htmlData); - const expectedHtml = '
'; - expect(htmlData.window.document.body.innerHTML).toEqual(expectedHtml); - }); -}); diff --git a/test/sanitize-html.test.js b/test/sanitize-html.test.js deleted file mode 100644 index e99327fb06..0000000000 --- a/test/sanitize-html.test.js +++ /dev/null @@ -1,160 +0,0 @@ -const sanitizeHTML = require('../src/backend/utils/html/sanitize'); - -const { WEB_URL } = process.env; - -describe('Sanitize HTML', () => { - test('

'); - expect(data).toBe('

Hello

'); - }); - - test('https://github.com/Seneca-CDOT/telescope/issues/1488', () => { - // Regex is rendered as plain text - const data = sanitizeHTML('(
(s?)+?){2,}
'); - expect(data).toBe('(
(s?)+?){2,}
'); - - // Script in a code block is removed - const xss = sanitizeHTML( - `harmless text
regex example: (
.*
)
` - ); - expect(xss).toBe(`harmless text
regex example: (
.*
)
`); - }); - - test(' should work, but inline js should not', () => { - const data = sanitizeHTML(''); - expect(data).toBe(''); - }); - - // note: any sort of image type is accepted using a data URI (.png, .gif, .jpg, etc.) - test(' URI links (gif based) should be accepted (i.e. not sanitized)', () => { - const data = sanitizeHTML( - '' - ); - expect(data).toBe( - '' - ); - }); - - test('img links over https should be accepted (i.e. not sanitized)', () => { - const data = sanitizeHTML( - '

' - ); - expect(data).toBe('

'); - }); - - // this test might break everything in the future as Chrome moves towards blocking mixed content. see: https://web.dev/what-is-mixed-content/ - test('img links over http should be accepted (i.e. not sanitized)', () => { - const data = sanitizeHTML( - '

' - ); - expect(data).toBe('

'); - }); - - test('protocoless urls should be accepted (i.e. not sanitized)', () => { - const data = sanitizeHTML(''); - expect(data).toBe(''); - }); - - test(' should work, but inline js should not', () => { - const data = sanitizeHTML( - 'W3Schools' - ); - expect(data).toBe(''); - }); - - test('

with inline style, sanitize strips inline style', () => { - const data = sanitizeHTML('

Here is color blue

'); - expect(data).toBe('

Here is color blue

'); - }); - - test('

with should work with inline style stripped', () => { - const data = sanitizeHTML( - '

Here is color blue

' - ); - expect(data).toBe('

Here is color blue

'); - }); - - test('
with should strip inline style as both are added to allowed tags', () => { - const data = sanitizeHTML( - '
' - ); - expect(data).toBe( - '
' - ); - }); - - test('' - ); - expect(data).toBe(''); - }); - - test(''); - expect(data).toBe(''); - }); - - test(''); - expect(data).toBe(''); - }); - - test(''); - expect(data).toBe(''); - }); - - test('` - ); - expect(data).toBe( - `` - ); - }); - - test('cdn.embedly.com embedded content should not get removed', () => { - const data = sanitizeHTML( - '' - ); - expect(data).toBe( - '' - ); - }); - - test('medium.com embedded content including Gist code should not get removed', () => { - const data = sanitizeHTML( - '' - ); - expect(data).toBe( - '' - ); - }); - - test('
 with inline style, sanitize strips inline style', () => {
-    const data = sanitizeHTML('
Hello World
'); - expect(data).toBe('
Hello World
'); - }); - - test(' with multiple tags and links should work', () => { - const data = sanitizeHTML( - '
The Final Product
' - ); - expect(data).toBe( - '
The Final Product
' - ); - }); - - test('twitch.tv embedded content should not be removed', () => { - // The parent domain also embeds the stream, so include the web url used here as well - const data = sanitizeHTML( - `` - ); - expect(data).toBe( - `` - ); - }); -}); diff --git a/test/server.test.js b/test/server.test.js deleted file mode 100644 index 4d35bdb652..0000000000 --- a/test/server.test.js +++ /dev/null @@ -1,22 +0,0 @@ -const request = require('supertest'); -const app = require('../src/backend/web/app'); - -jest.mock('../src/backend/utils/indexer'); - -describe('Health Check', () => { - it('should return a JSON object with property "status"', async () => { - const res = await request(app).get('/health'); - expect(res.statusCode).toEqual(200); - expect(res.body).toHaveProperty('status'); - expect(res.body.status).toEqual('ok'); - }); - - it('should return a JSON object with property "info"', async () => { - const res = await request(app).get('/health'); - expect(res.statusCode).toEqual(200); - expect(res.body).toHaveProperty('info'); - expect(res.body.info).toHaveProperty('gitHubUrl'); - expect(res.body.info).toHaveProperty('sha'); - expect(res.body.info).toHaveProperty('version'); - }); -}); diff --git a/test/storage.test.js b/test/storage.test.js deleted file mode 100644 index 8342a99d75..0000000000 --- a/test/storage.test.js +++ /dev/null @@ -1,190 +0,0 @@ -const { - addFeed, - getFeed, - getFeeds, - getFlaggedFeeds, - getFeedsCount, - removeFeed, - setFlaggedFeed, - unsetFlaggedFeed, - addPost, - getPost, - getPosts, - getPostsCount, - removePost, -} = require('../src/backend/utils/storage'); - -const Feed = require('../src/backend/data/feed'); -const hash = require('../src/backend/data/hash'); - -describe('Storage tests for feeds', () => { - const feed1 = new Feed('James Smith', 'http://seneca.co/jsmith', 'user'); - const feed2 = new Feed('James Smith 2', 'http://seneca.co/jsmith/2', 'user'); - const feed3 = new Feed('James Smith 2', 'http://seneca.co/jsmith/3', 'user', null, 'etag'); - const feed4 = new Feed( - 'James Smith 2', - 'http://seneca.co/jsmith/4', - 'user', - 'http://seneca.co/jsmith', - 'etag', - 'last-modified' - ); - - beforeAll(() => Promise.all([addFeed(feed1), addFeed(feed2), addFeed(feed3), addFeed(feed4)])); - - it('should allow retrieving a feed by id after inserting', async () => { - const feed = await getFeed(feed1.id); - expect(feed.id).toEqual(feed1.id); - expect(feed.author).toEqual(feed1.author); - expect(feed.url).toEqual(feed1.url); - expect(feed.etag).toEqual(''); - expect(feed.lastModified).toEqual(''); - expect(feed.user).toEqual(feed1.user); - }); - - it('should return expected feed count', async () => { - expect(await getFeedsCount()).toEqual(4); - }); - - it('should return expected feeds', async () => { - expect(await getFeeds()).toEqual([feed1.id, feed2.id, feed3.id, feed4.id]); - }); - - it('should deal with etag property correctly when available and missing', async () => { - const feeds = await Promise.all((await getFeeds()).map((id) => getFeed(id))); - expect(feeds[0].etag).toBe(''); - expect(feeds[1].etag).toBe(''); - expect(feeds[2].etag).toBe('etag'); - expect(feeds[3].etag).toBe('etag'); - }); - - it('should deal with lastModified property correctly when available and missing', async () => { - const feeds = await Promise.all((await getFeeds()).map((id) => getFeed(id))); - expect(feeds[0].lastModified).toBe(''); - expect(feeds[1].lastModified).toBe(''); - expect(feeds[2].lastModified).toBe(''); - expect(feeds[3].lastModified).toBe('last-modified'); - }); - - it('feed4 should have a link value', async () => { - const feeds = await Promise.all((await getFeeds()).map((id) => getFeed(id))); - expect(feeds[0].link).toBe(''); - expect(feeds[1].link).toBe(''); - expect(feeds[2].link).toBe(''); - expect(feeds[3].link).toBe('http://seneca.co/jsmith'); - }); - - it('feed4 should not exist after being removed', async () => { - const feed = await getFeed(feed4.id); - await removeFeed(feed.id); - // Removing an already removed Feed should not error - await removeFeed(feed.id); - const removedFeed = await getFeed(feed.id); - // This should return an empty Object {} (no id) - const feeds = await getFeeds(); - expect(removedFeed.id).toBe(undefined); - expect(feeds.includes(feed.id)).toBe(false); - }); - - it('feed3 should appear in flaggedFeed set after being flagged', async () => { - const feed = await getFeed(feed3.id); - await setFlaggedFeed(feed3.id); - const feeds = await getFeeds(); - const flaggedFeeds = await getFlaggedFeeds(); - expect(flaggedFeeds.includes(feed.id)).toBe(true); - expect(feeds.includes(feed.id)).toBe(false); - }); - - it('feed3 should not appear in flaggedFeed set after being unflagged', async () => { - const feed = await getFeed(feed3.id); - await unsetFlaggedFeed(feed3.id); - const feeds = await getFeeds(); - const flaggedFeeds = await getFlaggedFeeds(); - expect(feeds.includes(feed.id)).toBe(true); - expect(flaggedFeeds.includes(feed.id)).toBe(false); - }); -}); - -describe('Storage tests for posts', () => { - const testPost = { - guid: 'http://example.com', - id: hash('http://example.com'), - author: 'foo', - title: 'foo', - link: 'foo', - content: 'foo', - text: 'foo', - updated: new Date('2009-09-07T22:23:00.544Z'), - published: new Date('2009-09-07T22:20:00.000Z'), - url: 'foo', - site: 'foo', - }; - - const testPost2 = { - guid: 'http://dev.telescope.cdot.systems', - id: hash('http://dev.telescope.cdot.systems'), - author: 'foo', - title: 'foo', - link: 'foo', - content: 'foo', - text: 'foo', - updated: new Date('2009-09-07T22:23:00.544Z'), - published: new Date('2009-09-07T22:21:00.000Z'), - url: 'foo', - site: 'foo', - }; - - const testPost3 = { - guid: 'http://telescope.cdot.systems', - id: hash('http://telescope.cdot.systems'), - author: 'foo', - title: 'foo', - link: 'foo', - content: 'foo', - text: 'foo', - updated: new Date('2009-09-07T22:23:00.544Z'), - published: new Date('2009-09-07T22:22:00.000Z'), - url: 'foo', - site: 'foo', - }; - - beforeAll(() => Promise.all([testPost, testPost2, testPost3].map((post) => addPost(post)))); - - it('should allow retrieving a post by id after inserting', async () => { - const posts = await getPosts(0, 0); - const result = await getPost(posts[0]); - expect(result.id).toEqual(testPost3.id); - }); - - it('get all posts returns current number of posts', async () => { - const result = await getPosts(0, 0); - expect(result.length).toEqual(3); - }); - - it('get all posts returns sorted posts by date', async () => { - const result = await getPosts(0, 0); - const firstPost = await getPost(result[0]); - const secondPost = await getPost(result[1]); - expect(firstPost.id).toEqual(testPost3.id); - expect(secondPost.id).toEqual(testPost2.id); - }); - - it('check post count', async () => { - const count = await getPostsCount(); - expect(count).toEqual(3); - }); - - it('testPost and testPost2 should not appear in results after being removed', async () => { - const initPostCount = getPostsCount(); - await Promise.all([removePost(testPost.id), removePost(testPost2.id)]); - const postCount = getPostsCount(); - const posts = await getPosts(0, 0); - // Counts should not be the same after removing two posts - expect(postCount).not.toBe(initPostCount); - // id of testPost1 + testPost2 should not be in the array of postId returned by getPosts() - expect(posts.includes(testPost.id)).toBe(false); - expect(posts.includes(testPost2.id)).toBe(false); - // Checking to make sure testPost3 id is in there just to make sure - expect(posts.includes(testPost3.id)).toBe(true); - }); -}); diff --git a/test/syntax-highlight.test.js b/test/syntax-highlight.test.js deleted file mode 100644 index ee9d340777..0000000000 --- a/test/syntax-highlight.test.js +++ /dev/null @@ -1,69 +0,0 @@ -/** - * @jest-environment jsdom - */ - -const toDOM = require('../src/backend/utils/html/dom'); -const highlight = require('../src/backend/utils/html/syntax-highlight'); - -function syntaxHighlighter(html) { - const dom = toDOM(html); - highlight(dom); - return dom.window.document.body.innerHTML; -} - -/** - * syntaxHighlighter() will markup code so it can be styled as code with CSS - */ -describe('syntax-highlight tests', () => { - test('empty code blocks are left untouched', () => { - const original = ''; - const result = syntaxHighlighter(original); - expect(result).toEqual(original); - }); - - test('regular prose is left untouched', () => { - const original = 'This should stay identical.'; - const result = syntaxHighlighter(original); - expect(result).toEqual(original); - }); - - test('code outside
...
should be left untouched', () => { - const original = 'function fn() { console.log("This should stay identical."); }\n\n'; - const result = syntaxHighlighter(original); - expect(result).toEqual(original); - }); - - test('code inside
...
should get marked up', () => { - // C# code example, in regular text and as code - const original = 'const int i = 5;
const int i = 5;
'; - const result = syntaxHighlighter(original); - const expected = - 'const int i = 5;
const int i = 5;
'; - expect(result).toEqual(expected); - }); - - test('bash is correctly marked up', () => { - const original = '
cd foo
'; - const result = syntaxHighlighter(original); - const expected = - '
cd foo
'; - expect(result).toEqual(expected); - }); - - test('JavaScript is correctly marked up', () => { - const original = - '
import React, { Component } from "react"; function main() { console.log("hi"); }
'; - const result = syntaxHighlighter(original); - const expected = - '
import React, { Component } from "react"; function main() { console.log("hi"); }
'; - expect(result).toEqual(expected); - }); - - test('2 followed escape characters (e.g. &&) with preset syntax-highlight should be converted correctly', () => { - const data = syntaxHighlighter( - `
npx husky-init && &< npm install --save-dev prettier pretty-quick
` - ); - const expectedData = `
npx husky-init && &< npm install --save-dev prettier pretty-quick
`; - expect(data).toBe(expectedData); - }); -}); diff --git a/test/test_files/blog.humphd.org.rss b/test/test_files/blog.humphd.org.rss deleted file mode 100644 index 899db4a93c..0000000000 --- a/test/test_files/blog.humphd.org.rss +++ /dev/null @@ -1,406 +0,0 @@ -<![CDATA[seneca - Bread & Circuits]]>https://blog.humphd.org/http://blog.humphd.org/favicon.pngseneca - Bread & Circuitshttps://blog.humphd.org/Ghost 2.15Mon, 20 Jan 2020 17:23:21 GMT60<![CDATA[Teaching Open Source, Fall 2019]]>Today I've completed another semester of teaching open source, and wanted to write something about what happened, experiments I tried, and what I learned.

This fall I taught the first of our two open source classes, cross-listed in our degree and diploma programs as OSD600 and DPS909.  This course focuses

]]>
https://blog.humphd.org/open-source-fall-2019/5df7bbd924511e03496b4734Mon, 16 Dec 2019 20:37:14 GMTToday I've completed another semester of teaching open source, and wanted to write something about what happened, experiments I tried, and what I learned.

This fall I taught the first of our two open source classes, cross-listed in our degree and diploma programs as OSD600 and DPS909.  This course focuses on getting students engaged in open source development practices, and has them participate in half-a-dozen different open source projects, making an average of 10 pull requests over 14 weeks.  The emphasis is on learning git, GitHub, and how to cope in large open source projects, code bases, and communities.

This is the 15th year I've taught it, and I had one of my largest groups: 60 students spread across two sections.  I don't think I could cope with more than this, especially when I'm also teaching other courses at the same time.

Running the Numbers

I ran an analysis this morning, and here's some data on the work the students did:

  • 665 Pull Requests (2,048 commits) to 266 GitHub Repositories
  • 425 have already been merged (63%)
  • +85,873/-25,510 lines changed in 2,049 files

They worked on all kinds of things, big and small.  I kept a list of projects and organizations I knew while I was marking this week, and some of what I saw included:

Whatever they worked on, I encouraged the students to progress as they went, which I define as building on previous experience, and trying to do a bit more with each PR.  "More" could mean working on a larger project, moving from a smaller "good first issue" type fix to something larger, or fixing multiple small bugs where previously they only did one.  I'm interested in seeing growth.

Personal Experiences

The students find their way into lots of projects I've never heard of, or wouldn't know to recommend.  By following their own passions and interests, fascinating things happen.

For example, one student fixed a bunch of things in knitcodemonkey/hexagon-quilt-map, a web app for creating quilt patterns.  Another got deeply involved in the community of a service mesh project called Layer5. A few women in one of my sections got excited about Microsoft’s recently open sourced C++ Standard Library.  If you'd ask me which projects students would work on, I wouldn't have guessed the STL; and yet it turned out to be a really great fit.  One of the students wrote it about it in her blog:

Why do I always pick issues from Microsoft/STL?  It's because of the way they explain each bug. It is very well defined, there is a description of how to reproduce the bug and files that are crashing or need updates. Another reason is that there are only 20 contributors to this project and I'm 8th by the amount of contributing (even though it is only 31 line of code). The contributors are very quick to respond if I need any help or to review my PR.

Working on projects like those I've listed above isn't easy, but has its own set of rewards, not least that it adds useful experience to the students' resume.  As one student put it in his blog:

I have officially contributed to Facebook, Angular, Microsoft, Mozilla and many more projects (feels kinda nice to say it).  

Another wrote:

I contribute to various repositories all over the world and my work is being accepted and appreciated! My work and I are valued by the community of the Software Developers!

And another put it this way:

The most important thing — I am now a real Open-Source Developer!

Becoming a real open source developer means dealing with real problems, too.  One student put it well in his blog:

Programming is not an easy thing.

No, it isn't.  Despite the positive results, if you talked to the students during the labs, you would have heard them complaining about all sorts of problems doing this work, from wasting time finding things to work on, to problems with setting up their development environments, to difficulties understanding the code.  However, regardless of their complaints, most manage to get things done, and a lot do quite interesting work.

There's no doubt that having real deadlines, and a course grade to motivate them to find and finish open source work helps a lot more of them get involved than would if they were doing this on the side.  The students who don't take these courses could get involved in open source, but don't tend to--a lot more people are capable of this work than realize it.  They just need to put in the time.

The Devil's in the Details

I wish I could say that I've found a guaranteed strategy to get students to come to class or do their homework, but alas, I haven't.  Not all students do put in the time, and for them, this can be a really frustrating and defeating process, as they find out that you can't do this sort of work last minute. They might be able to stay up all night and cram for a test, or write a half-hearted paper; but you can't fix software bugs this way.  Everything that can go wrong will (especially for those using Windows), and these Yaks won't shave themselves.  You need hours of uninterrupted development, patience, and time to communicate with the community.

One of the themes that kept repeating in my head this term is that work like this is all about paying attention to small details.  I'm amazed when I meet students taking an advanced programming course who can't be bothered with the details of this work.  I don't mean being able to answer trivia questions about the latest tech craze.  Rather, I mean being attuned to the importance and interplay of software, versions, libraries, tools, operating systems, syntax, and the like.  Computers aren't forgiving.  They don't care how hard you try.  If you aren't interested in examining software at the cellular level, it's really hard to breath life into source code.

Everything matters.  Students are amazed when they have to fix their commit messages ("too long", "wrong format", "reference this issue..."); squash and rebase commits (I warned them!), fix formatting (or vice versa when their editor got too ambitious autoformatting unrelated lines); change the names of variables; add comments; remove comments; add tests; fix tests; update version numbers; avoid updating version numbers! sign CLAs; add their name to AUTHORS files; don't add their name to other files!  pay attention to the failures on CI; ignore these other CI failures.

Thankfully, a lot of them do understand this, and as they travel further down the stacks of the software they use, and fix bugs deep inside massive programs, the value of small, delicate changes starts to make sense.  One of my students sent me a note to explain why one of her PRs was so small.  She had worked for weeks on a fix to a Google project, and in the end, all that investment of research, time, and debugging had resulted in a single line of code being changed.  Luckily I don't grade submissions by counting lines of code.  To me, this was a perfect fix.  I tried to reassure her by pointing out all the bugs she hadn't added by including more code than necessary.  Google agreed, and merged her fix.

Something New

This term I also decided to try something new.  We do a bunch of case studies, looking at successful open source projects (e.g., Redis, Prettier, VSCode) and I wanted to try and build an open source project together with the whole class using as much of the same tech and processes as possible.

I always get students involved in "external" projects (projects like those mentioned above, not run by me).  But by adding an "internal" project (one we run), a whole new kind of learning takes place.  Rather than showing up to an existing project, submitting a pull request and then moving on to the next, having our own project meant that students had to learn what it's like to be on the other side of a pull request, to become a maintainer vs. a contributor.

I've done smaller versions of this in the past, where I let students work in groups.  But to be honest it rarely works out the way I want.  Most students don't have enough experience designing and implementing software to be able to build something, and especially not something with lots of opportunity for people to contribute in parallel.

Our project was an RSS/Atom blog aggregator service and frontend called Telescope.  The 60 of us wrote it in a month, and I'm pleased to say that "it works!" (though it doesn't "Just Work" yet).  I recorded a visualization of the development process and put it on YouTube.

A day in the life of Telescope development

I've written a lot of open source software with students before, but never this many at once.  It was an interesting experience.  Here are some of my take-aways:

  • I tried to file Issues and review PRs vs. writing code.  We got to 500 Issues and PRs by the end, and I filed and reviewed hundreds of these.  Most students are still learning how to file bugs, and how to think about decomposing a feature into actionable steps.  As the weeks went on, they started to get the hang of it.  But in the end, I had to write the first bit of code to get people to join me.  I also had to be patient to let the code evolve in "interesting" ways, and have faith we'd get to where we need to be in the end (we did).

  • No matter what I did (lessons in class, writing docs, helping people in the lab), people managed to get their git repos in a mess.  Because so many of the external open source projects the students worked on require a rebase workflow, I encouraged the same in our project.  However, that didn't stop people from merging master into their branches over and over again, sometimes a dozen times in the same pull request.  I still haven't figured out how to teach this perfectly.  I love git, but it's still really hard for beginners to use properly.  In the end I often did rebases myself to help get students out of a mess.  But despite the problems, most people got the hang of it after a few weeks.  One of the students put it this way in his blog: "This summer I knew very little about git/GitHub…it was pure hell. Now we are jumping between branches and rebasing commits as gracefully and confident as squirrels."

  • Having a mix of macOS, Linux, and Windows meant we had to spend a lot of time on environment issues.  I think this is good, because it helps prepare students for the realities of modern programming.  However, I didn't love fighting with Windows on so, so many minor things.  Despite what I read online, I remain unconvinced that Windows is ready to be used as a primary development environment.

  • As we slowly added automation, tooling, and CI to the project, things got so much better.  One of the students added eslint with Airbnb's style guide.  This was interesting to watch, because it is very particular about spacing, and it made everyone have to adjust their work.  Later, we added Prettier and a .vscode directory with default extensions and project settings for line-endings, format-on-save, etc.  This fixed 99% of the issues we'd had previously, though we still needed a .gitattributes fix for Windows line endings to make git happy in Windows CI.

  • We used Jest for automated testing, and most students got to work on writing tests for the first time.  When I showed them how test coverage worked, a lot of them got really interested in increasing test coverage.  They also had to contend with lots of interesting issues, like rate-limits in live APIs vs. using mocks for network requests, dealing with test isolation and ordering issues, environment issues in CI that didn't manifest locally, and learning to write code defensively.

  • We used Docker and docker-compose to start our various services.  This was new for almost everyone, and quite a few students got really interested in how containers work.  Again, Windows made this an extremely frustrating experience for many students, who could never get it to run properly, and we had to work around that, too.

  • We tried to implement various pieces of the 12 Factor approach, a lot of which were new to them.  For example, using environment variables.  Over the weeks they learned how various things can go wrong, like having authentication tokens slip into git.  It's invaluable learning, and making mistakes is how you do it.

  • I started with package-lock.json in git, and eventually removed it and went to exact versioning in package.json.  The amount of churn it caused with this many people was unworkable, especially with people learning git and npm at the same time.

  • The pace of development was frenetic.  For some people, this was a plus, and it got them excited.  For others, it was a turn off, and helped to further isolate them when they couldn't get their environment to work, or when they had to continually rebase branches that couldn't get merged fast enough.  It was imperative that I be there to help keep things moving, jump in with tricky rebases and merges, and help to finish certain problematic fixes.  For this to work, you really need a few "senior" people who can help keep things moving.

  • Despite what I wrote above, students will not ask for help.  Not in person.  Not on Slack.  Not in issues.  It's not a tooling problem.  By putting them all together in one place, the natural tendency to not want to look stupid gets multiplied, and people become less collaborative, rather than more.  They want to show-up with something finished vs. in process and broken.  I set-up a Slack channel, and it was mostly empty.  However, a whole bunch of private chat rooms and Discords sprouted in parallel.  It's hard to know how to make this work perfectly.  I try to model "not knowing" and asking questions, but it takes a long time to become comfortable working this way.  I know I don't know everything, and that's OK. Students are still figuring out how they feel about this.

  • Students reviewing each other's (and my) code is really valuable.  I made all the students Collaborators, and set GitHub so that you needed 2 approvals to merge any code on master.  Sometimes I used my admin rights to merge something fast, but for the most part this worked really well.  One of the students commented in his blog how much he'd learned just by reviewing everyone's code.  We also talked a lot about how to write changes so they'd be more reviewable, and this got better over time, especially as we studied Google's Code Review process.  It's a real skill to know how to do a review, and unless you get to practice it, it's hard to make much progress.  We also got to see the fallout of bad reviews, which let test failures land, or which removed code that they should not have.

In the winter term I'm going to continue working on the project in the second course, and take them through the process of shipping it into production.  Moving from fixing a bug to fixing enough bugs in order to ship is yet another experience they need to have.  If you want to join us, you'd be welcome to do so.

All in all, it was a busy term, and I'm ready for a break.  I continue to be incredibly grateful to all the projects and maintainers who reviewed code and worked with my students this time.  Thank you to all of you.  I couldn't do any of this if you weren't so open and willing for us to show up in your projects.

]]>
<![CDATA[What is a blog post?]]>I've been working on some RSS/Atom blog aggregation software with my open source students.  Recently we got everything working, and it let me do an analysis of the past 15 years of blogging by my students.

I wanted to answer the question, "What is a blog post?"  That is,

]]>
https://blog.humphd.org/what-is-a-blog-post/5df2aa4c24511e03496b46bcThu, 12 Dec 2019 21:29:46 GMTI've been working on some RSS/Atom blog aggregation software with my open source students.  Recently we got everything working, and it let me do an analysis of the past 15 years of blogging by my students.

I wanted to answer the question, "What is a blog post?"  That is, which HTML elements are used at all, and most often?  Which are never used?  My students have used every blogging platform you can think of over the years, from WordPress to Blogger to Medium, and many have rolled their own.  Therefore, while not perfect, this is a pretty good view into what blogging software uses.

Analyzing many thousands of posts, and hundreds of thousands of elements, here's what I found.  The top 5 elements account for 75% of all elements used.  A blog post is mostly:

  1. <br> (35%)
  2. <p> (18%)
  3. <a> (10%)
  4. <div> (15%)
  5. <li> (8%)

I'm really surprised at <br> being on top.  The next 18% is made up of the following:

  1. <td> (3%)
  2. <strong> (3%)
  3. <img> (3%)
  4. <pre> (2%)
  5. <code> (2%)
  6. <b> (1.5%)
  7. <em> (1.3%)
  8. <ul> (1.1%)
  9. <tr> (1%)

And the remainder are all used infrequently (< 1%):

  1. <h3>
  2. <figure>
  3. <i>
  4. <h4>
  5. <blockquote>
  6. <ol>
  7. <hr>
  8. <table>
  9. <tbody>
  10. <th>
  11. <h5>
  12. <iframe>
  13. <strike>
  14. <h6>
  15. <thead>
  16. <caption>

It's intresting to see the order of the heading levels match their frequency.  I'm also interested in what isn't here.  In all these posts, there's no <span>, ever.

]]>
<![CDATA[Hacktoberfest 2019]]>I've been marking student submissions in my open source course this weekend, and with only a half-dozen more to do, the procrastinator in me decided a blog post was in order.

Once again I've asked my students to participate in Hacktoberfest.  I wrote about the experience last year, and wanted

]]>
https://blog.humphd.org/hacktoberfest-2019/5dc89fce24511e03496b45b3Mon, 11 Nov 2019 00:17:53 GMTI've been marking student submissions in my open source course this weekend, and with only a half-dozen more to do, the procrastinator in me decided a blog post was in order.

Once again I've asked my students to participate in Hacktoberfest.  I wrote about the experience last year, and wanted to give an update on how it went this time.

I layer a few extra requirements on the students, some of them to deal with things I've learned in the past.  For one, I ask them to set some personal goals for the month, and look at each pull request as a chance to progress toward achieving these goals.  The students are quite different from one another, which I want to celebrate, and this lets them go in different directions, and move at different paces.

Here are some examples of the goals I heard this time around:

  • Finish all the required PRs
  • Increase confidence in myself as a developer
  • Master git/GitHub
  • Learn new languages and technologies (Rust, Python, React, etc)
  • Contribute to projects we use and enjoy on a daily basis (e.g., VSCode)
  • Contribute to some bigger projects (e.g., Mozilla)
  • Add more experience to our resume
  • Read other people's code, and get better at understanding new code
  • Work on projects used around the world
  • Work on projects used locally
  • Learn more about how big projects do testing

So how did it go?  First, the numbers:

  • 62 students completed all 4 PRs during the month (95% completion rate)
  • 246 Pull Requests were made, consisting of 647 commits to 881 files
  • 32K lines of code were added or modified

I'm always interested in the languages they choose.  I let them work on any open source projects, so given this freedom, how will they use it?  The most popular languages by pull request ere:

  • JavaScript/TypeScript - 50%
  • HTML/CSS - 11%
  • C/C++/C# - 11%
  • Python - 10%
  • Java - 5%

Web technology projects dominate GitHub, and it's interesting to see that this is not entirely out of sync with GitHub's own stats on language positions.  As always, the long-tail provides interesting info as well.  A lot of people worked on bugs in languages they didn't know previously, including:

Swift, PHP, Go, Rust, OCaml, PowerShell, Ruby, Elixir, Kotlin

Because I ask the students to "progress" with the complexity and involvement of their pull requests, I had fewer people working in "Hacktoberfest" style repos (projects that popup for October, and quickly vanish).  Instead, many students found their way into larger and well known repositories and organizations, including:

Polymer, Bitcoin, Angular, Ethereum, VSCode, Microsoft Calculator, React Native for Windows, Microsoft STL, Jest, WordPress, node.js, Nasa, Mozilla, Home Assistant, Google, Instacart

The top GitHub organization by pull request volume was Microsoft.  Students worked on many Microsoft projects, which is interesting, since they didn't coordinate their efforts.  It turns out that Microsoft has a lot of open source these days.

When we were done, I asked the students to reflect on the process a bit, and answer a few questions.  Here's what I heard.

1. What are you proud of?  What did you accomplish during October?

  • Contributing to big projects (e.g., Microsoft STL, Nasa, Rust)
  • Contributing to small projects, who really needed my help
  • Learning a new language (e.g., Python)
  • Having PRs merged into projects we respect
  • Translation work -- using my personal skills to help a project
  • Seeing our work get shipped in a product we use
  • Learning new tech (e.g., complex dev environments, creating browser extensions)
  • Successfully contributing to a huge code base
  • Getting involved in open source communities
  • Overcoming the intimidation of getting involved

2. What surprised you about Open Source?  How was it different than you expected?

  • People in the community were much nicer than I expected
  • I expected more documentation, it was lacking
  • The range of projects: big companies, but also individuals and small communities
  • People spent time commenting on, reviewing, and helping with our PRs
  • People responded faster than we anticipated
  • At the same time, we also found that some projects never bothered to respond
  • Surprised to learn that everything I use has some amount of open source in it
  • Surprised at how many cool projects there are, so many that I don’t know about
  • Even on small issues, lead contributors will get involved in helping (e.g., 7 reviews in a node.js fix)
  • Surprised at how unhelpful the “Hacktoberfest” label is in general
  • “Good First Issue” doesn’t mean it will be easy.  People have different standards for what this means
  • Lots of things on GitHub are inactive, be careful you don’t waste your time
  • Projects have very different standards from one to the next, in terms of process, how professional they are, etc.
  • Surprised to see some of the hacks even really big projects use
  • Surprised how willing people were to let us get involved in their projects
  • Lots of camaraderie between devs in the community

3. What advice would you give yourself for next time?

  • Start small, progress from there
  • Manage your time well, it takes way longer than you think
  • Learn how to use GitHub’s Advanced Search well
  • Make use of your peers, ask for help
  • Less time looking for a perfect issue, more time fixing a good-enough issue
  • Don’t rely on the Hacktoberfest label alone.
  • Don’t be afraid to fail.  Even if a PR doesn’t work, you’ll learn a lot in the process
  • Pick issues in projects you are interested in, since it takes so much time
  • Don’t be afraid to work on things you don’t (yet) know.  You can learn a lot more than you think.
  • Read the contributing docs, and save yourself time and mistakes
  • Run and test code locally before you push
  • Don’t be too picky with what you work on, just get involved
  • Look at previously closed PRs in a project for ideas on how to solve your own.

One thing that was new for me this time around was seeing students get involved in repos and projects that didn't use English as their primary language.  I've had lots of students do localization in projects before.  But this time, I saw quite a few students working in languages other than English in issues and pull requests.  This is something I've been expecting to see for a while, especially with GitHub's Trending page so often featuring projects not in English.  But it was the first time it happened organically with my own students.

Once again, I'm grateful to the Hacktoberfest organizers, and to the hundreds of maintainers we encountered as we made our way across GitHub during October.  When you've been doing open source a long time, and work in git/GitHub everyday, it can be hard to remember what it's like to begin.  Because I continually return to the place where people start, I know first-hand how valuable it is to be given the chance to get involved, for people to acknowledge and accept your work, and for people to see that it's possible to contribute.

]]>
<![CDATA[Some Assembly Required]]>In my open source courses, I spend a lot of time working with new developers who are trying to make sense of issues on GitHub and figure out how to begin.  When it comes to how people write their issues, I see all kinds of styles.  Some people write for

]]>
https://blog.humphd.org/some-assembly-required/5d700d6a24511e03496b4378Wed, 04 Sep 2019 20:30:26 GMTIn my open source courses, I spend a lot of time working with new developers who are trying to make sense of issues on GitHub and figure out how to begin.  When it comes to how people write their issues, I see all kinds of styles.  Some people write for themselves, using issues like a TODO list: "I need to fix X and Y."  Other people log notes from a call or meeting, relying on the collective memory of those who attended: "We agreed that so-and-so is going to do such-and-such."  Still others write issues that come from outside the project, recording a bug or some other problem: "Here is what is happening to me..."

Because I'm getting ready to take another cohort of students into the wilds of GitHub, I've been thinking once more about ways to make this process better.  Recently I spent a number of days assembling furniture from IKEA with my wife.  Spending that much time with Allen keys got me thinking about what we could learn from IKEA's work to enable contribution from customers.

I am not a furniture maker.  Not even close.  While I own some power tools, most were gifts from my father, who actually knows how to wield them.  I'm fearless when it comes to altering bits in a computer; but anything that puts holes in wood, metal, or concrete terrifies me.  And yet, like so many other people around the world, I've "built" all kinds of furniture in our house--at least I've assembled it.

In case you haven't bought furniture from IKEA, they are famous for designing not only the furniture itself, but also the materials, packaging, and saving cost by offloading most of the assmbly to the customer.  Each piece comes with instructions, showing the parts manifest, tools you'll need (often simple ones are included), and pictorial, step-wise instructions for assembling the piece.

IKEA's model is amazing: amazing that people will do it, amazing that it's doable at all by the general public!  You're asking people to do a task that they a) probably have never done before; b) probably won't do again.  Sometimes you'll buy 4 of some piece, a chair, and through repeated trial and error, get to the point where you can assemble it intuitively.  But this isn't the normal use case.  For the most part, we buy something we don't have, assemble it, and then we have it.  This means that the process has to work during the initial attempt, without training.  IKEA is keen that it work because they don't want you to return it, or worse, never come back again.

Last week I assembled all kinds of things for a few rooms in our basement: chairs, a couch, tables, etc.  I spent hours looking at, and working my way through IKEA instructions.  Take another look at the Billy instructions I included above.  Here's some of what I notice:

  • It starts with the end-goal: here is how things should look when you're done
  • It tells you what tools you'll need in order to make this happen and, importantly, imposes strict limits on the sorts of tools that might be required.  An expert could probably make use of more advanced tools; but this isn't for experts.
  • It gives you a few GOTCHAs to avoid up front.  "Be careful to do it this way, not that way." This repeats throughout the rest of the steps.  Like this, not that.
  • It itemizes and names (via part number) all the various pieces you'll need.  There should be 16 of these, 18 of these, etc.
  • It takes you step-by-step through maniplating the parts on the floor into the product you saw in the store, all without words.
  • Now look at how short this thing is.  The information density is high even though the complexity is low.

It got me thinking about lessons we could learn when filing issues in open source projects.  I realize that there isn't a perfect analogy between assmbling furniture and fixing a bug.  IKEA mass produces the same bookshelf, chairs, and tables, and these instructions work on all of them.  Meanwhile, a bug (hopefully) vanishes as soon as it's fixed.  We can't put the same effort into our instructions for a one-off experience as we can for a mass produced one.  However, in both cases, I would stress that the experience is similar for the person working through the "assembly," it's often their first time following these steps.

When filing a GitHub issue, what could we learn from IKEA instructions?

  1. Show the end goal of the work.  "This issue is about moving this button to the right.  Currently it looks like this and we want it to look like this."  A lot of people do this, especially with visual, UI related bugs.  However, we could do a version of it on lots of non-visual bugs too.  Here is what you're trying to acheive with this work.  When we file bugs, we assume this is always clear.  But imagine it needs to be clear based solely on these "instructions."
  2. List the tools you'll need to accomplish this, and include any that are not common.  We do this sometimes. "Go read the CONTRIBUTING.md page."  That might be enough.  But we could put more effort into calling out specific things you'll need that might not be obvious, URLs to things, command invocation examples, etc.  I think a lot of people bristle at the idea of using issues to teach people "common knowledge."  I agree that there's a limit to what is reasonable in an issue (recall how short IKEA's was).  But we often err on the side of not-enough, and assume that our knowledge is the same as our reader's.  It almost certainly won't be if this is for a new contributor.
  3. Call out the obsticles in the way of accomplishing this work.  Probably there are some things you should know about how the tests run when we change this part of the code.  Or maybe you need to be aware that we need to run some script after we update things in this directory.  Any mistakes that people have made in the past, and which haven't been dealt with through automation, are probably in scope here.  Even better, put them in a more sticky location like the official docs, and link to them from here.
  4. Include a manifest of the small parts involved.  For example, see the lines of code here, here, and here.  You'll have to update this file, this file, and that file.  This is the domain of the change you're going to need to make.  Be clear about what's involved.  I've done this a lot, and it often doesn't take much time when you know the code well.  However, for the new contributor, this is a lifesaver.
  5. Include a set of steps that one could follow on the way to making this fix.  This is espeically important in the case that changes need to happen in a sequence.

These steps aren't always possible or practical.  But it takes less work than you might think, and the quality of the contributions you get as a result is worth the upfront investment.  In reality, you'll likely end up having to it in reviews after the fact, when people get it wrong.  Try doing it at the beginning.

Here's a fantastic example of how to do it well.  I've tweeted about this in the past, but Devon Abbott's issue in the Lona repo is fantastic: https://github.com/airbnb/Lona/issues/338.  Here we see many of the things outlined above.  As a result of this initial work, one of my students was able to jump in.

I want to be careful to not assume that everyone has time to do all this when filing bugs.  Not all projects are meant for external contributors (GitHub actually needs some kind of signal so that people know when to engage and when to avoid certain repos), and not all developers on GitHub are looking to mentor or work with new contributors.  Regardless, I think we could all improve our issues if we thought back to these IKEA instructions from time to time.  A lot of code fixes and regular maintenance tasks should really feel more like assembling furniture vs. hand carving a table leg.  There's so much to do to keep all this code working, we are going to have to find ways to engage and involve new generations of developers who need a hand getting started.

]]>
<![CDATA[irc.mozilla.org]]>Today I read Mike Hoye's blog post about Mozilla's IRC server coming to an end.  He writes:

Mozilla has relied on IRC as our main synchronous communications tool since the beginning...While we still use it heavily, IRC is an ongoing source of abuse and  harassment for many of our
]]>
https://blog.humphd.org/irc-mozilla-org/5cc3a0f724511e03496b4057Sat, 27 Apr 2019 02:51:04 GMTToday I read Mike Hoye's blog post about Mozilla's IRC server coming to an end.  He writes:

Mozilla has relied on IRC as our main synchronous communications tool since the beginning...While we still use it heavily, IRC is an ongoing source of abuse and  harassment for many of our colleagues and getting connected to this now-obscure forum is an unnecessary technical barrier for anyone finding their way to Mozilla via the web.  

And, while "Mozilla intends to deprecate IRC," he goes on to say:

we definitely still need a globally-available, synchronous and text-first communication tool.

While I made dinner tonight, I thought back over my long history using Mozilla's IRC system, and tried to understand its place in my personal development within Mozilla and open source.

/invite

I remember the very first time I used IRC.  It was 2004, and earlier in the week I had met with Mike Shaver at Seneca, probably for the first time, and he'd ended our meeting with a phrase I'd never heard before, but I nodded knowingly nevertheless: "Ping me in #developers."

Ping me.  What on earth did that mean!? Little did I know that this phrase would come to signify so much about the next decade of my life.  After some research and initial trial and error, 'dave' joined irc.mozilla.org and found his way to the unlisted #developers channel.  And there was 'shaver', along with 300 or so other #developers.

The immediacy of it was unlike anything I'd used before (or since).  To join irc was to be transported somewhere else.  You weren't anywhere, or rather, you were simultaneously everywhere.  For many of these years I was connecting to irc from an old farm house in the middle of rural Ontario over a satellite internet connection.  But when I got online, there in the channels with me were people from New Zealand, the US, Sweden, and everywhere in between.

Possibly you've been on video calls with people from around the world, and felt something similar.  However, what was different from a video call, or teleconference, or any other medium I've used since, is that the time together didn't need to end.  You weren't meeting as such, and there wasn't a timebox or shared goal around your presence there.  Instead, you were working amongst one another, co-existing, listening, and most importantly for me, learning.

/join

Over the next year, irc went from being something I used here and there to something I used all the time.  I became 'humph' (one day Brendan confused me for Dave Herman, and shaver started calling me 'humph' to clarify) and have remained so ever since.  There are lots of people who have only ever called me 'humph' even to my face, which is hilarious and odd, but also very special.

Mike Beltzner taught me how to overcome one of the more difficult aspects of IRC: maintaining context after you log off.  Using screen and irssi I was able to start, leave, and then pick up conversations at a later time.  It's something you take for granted on Slack, but was critical to me being able to leverage IRC as a source of knowledge: if I asked a question, it might be hours before the person who could answer it would wake up and join irc from another part of the planet.

I became more engaged with different areas of the project.  IRC is siloed.  A given server is partitioned into many different channels, and each has its own sub-culture, appropriate topics, and community.  However, people typically participate in many channels.  As you get to know someone in one channel, you'll often hear more about the work happening in another.  Slowly I got invited into other channels and met more and more people across the Mozilla ecosystem.

Doing so took me places I hadn't anticipated.  For example, at some point I started chatting with people in #thunderbird, which led to me becoming an active contributor--I remember 'dascher' just started assigning me bugs to fix!  Another time I discovered the #static channel and a guy named 'taras' who was building crazy static analysis tools with gcc.  Without irc I can confidently say that I would have never started DXR, or worked on web audio, WebGL, all kinds of Firefox patches, or many of the other things I did.  I needed to be part of a community of peers and mentors for this work to be possible.

At a certain point I went from joining other channels to creating my own.  I started to build many communities within Mozilla to support new developers.  It was incredible to watch them fill up with a mix of experienced Mozilla contributors and people completely new to the project.  Over the years it helped to shape my approach to getting students involved in open source through direct participation.

/list

In some ways, IRC was short for "I Really Can do this."  On my own?  No.  No way. But with the support of a community that wasn't going to abandon me, who would answer my questions, spend long hours helping me debug things, or introduce me to people who might be able to unlock my progress, I was able to get all kinds of new things done.  People like shaver, ted, gavin, beltzner, vlad, jorendorff, reed, preed, bz, stuart, Standard8, Gijs, bsmedberg, rhelmer, dmose, myk, Sid, Pomax, and a hundred other friends and colleagues.

The kind of help you get on irc isn't perfect.  I can remember many times asking a question, and having bsmedberg give a reply, which would take me the rest of the day (or week!) to unpack and fully understand.  You got hints.  You got clues.  You were (sometimes) pointed in the right direction.  But no one was going to hold your hand the whole way.  You were at once surrounded by people who knew, and also completely on your own.  It still required a lot of personal research.  Everyone was also struggling with their own pieces of the puzzle, and it was key to know how much to ask, and how much to do on your own.

/query

Probably the most rewarding part of irc were the private messages.  Out of the blue, someone would ping you, sometimes in channel (or a new channel), but often just to you personally.  I developed many amazing friendships this way, some of them with people I've never met outside of a text window.

When I was working on the Firefox Audio Data API, I spent many weeks fighting with the DOM implementation.  There were quite a few people who knew this code, but their knowledge of it was too far beyond me, and I needed to work my way up to a place where we could discuss things.  I was very much on my own, and it was hard work.

One day I got a ping from someone calling themselves 'notmasteryet'.  I'd been blogging about my work, and linked to my patches, and 'notmasteryet' had started working on them.  You can't imagine the feeling of having someone on the internet randomly find you and say, "I think I figured out this tricky bit you've been struggling to make work."  That's exactly what happened, and we went on to spend many amazing weeks and months working on this together, sharing this quiet corner of Mozilla's irc server, moving at our own pace.

I hesitated to tell a story like this because there is no way to do justice to the many relationships I formed during the next decade.  I can't tell you all the amazing stories.  At one time or another, I got to work with just about everyone in Mozilla, and many became friends.  IRC allowed me to become a part of Mozilla in ways that would have been impossible just reading blogs, mailing lists, or bugzilla.  To build relationships, one needs long periods of time together.  It happens slowly.

/part

But then, at a certain point, I stopped completely.  It's maybe been four or five years since I last used irc.  There are lots of reasons for it.  Partly it was due to things mhoye discussed in his blog post (I can confirm that harassment is real on irc). But also Mozilla had changed, and many of my friends and colleagues had moved on.  IRC, and the Mozilla that populated it, is part of the past.

Around the same time I was leaving IRC, Slack was just starting to take off.  Since then, Slack has come to dominate the space once occupied by tools like irc.  As I write this, Slack is in the process of doing its IPO, with an impressive $400M in revenue last year.  Slack is popular.

When I gave up irc, I really didn't want to start in on another version of the same thing.  I've used it a lot out of necessity, and even in my open source classes as a way to expose my students to it, so they'll know how it works.  But I've never really found it compelling.  Slack is a better irc, there's no doubt.  But it's also not what I loved about irc.mozilla.org.

Mike writes that he's in the process of evaluating possible replacements for irc within Mozilla.  I think it's great that he and Mozilla are wrestling with this.  I wish more open source projects would do it, too.  Having a way to get deeply engaged with a community is important, especially one as large as Mozilla.

Whatever product or tool gets chosen, it needs to allow people to join without being invited.  Tools like Slack do a great job with authentication and managing identity.  But to achieve it they rely on gatekeeping.  I wasn't the typical person who used irc.mozilla.org when I started; but by using it for a long time, I made it a different place.  It's really important that any tool like this does more than just support the in-groups (e.g., employees, core contributors, etc).  It's also really important that any tool like this does better than create out-groups.

/quit

IRC was a critical part of my beginnings in open source.  I loved it.  I still miss many of the friends I used to talk to daily.  I miss having people ping me.  As I work with my open source students, I think a lot about what I'd do if I was starting today.  It's not possible to follow the same path I took.  The conclusion I've come to is that the only way to get started is to focus on connecting with people.  In the end, the tools don't matter, they change.  But the people matter a lot, and we should put all of our effort into building relationships with them.  

]]>
<![CDATA[Teaching Open Source: Sept 2018 - April 2019]]>Today I submitted my grades and completed another year of teaching.  I've spent the past few weeks marking student projects non-stop, which has included reading a lot of pull requests in my various open source courses.

As a way to keep myself sane while I marked, I wrote some code

]]>
https://blog.humphd.org/teaching-open-source-sept-2018-april/5cbde3e624511e03496b3be2Tue, 23 Apr 2019 02:31:19 GMTToday I submitted my grades and completed another year of teaching.  I've spent the past few weeks marking student projects non-stop, which has included reading a lot of pull requests in my various open source courses.

As a way to keep myself sane while I marked, I wrote some code to do analysis of all the pull requests my students worked on during the fall and winter terms.  I've been teaching open source courses at Seneca since 2005, and I've always wanted to do this.  Now that I've got all of my students contributing to projects on GitHub, it's become much easier to collect this info via the amazing GitHub API.

I never teach these classes exactly the same way twice.  Some years I've had everyone work on different parts of a larger project, for example, implementing features in Firefox, or working on specific web tooling.  This year I took a different approach, and let each student be self-directed, giving them more freedom to choose whatever open source projects they wanted.  Having done so, I wanted to better understand the results, and what lessons I could draw for subsequent years.

GitHub Analysis

To begin, here are some of the numbers:

104 students participated in my open source courses from Sept 2018 to April 2019, some taking both the first and second courses I teach.

Together, these students made 1,014 Pull Requests to 308 Repositories.  The average number of PRs per student was 9.66 (mode=12, median=10, max=22).  Here's a breakdown of what happened with these PRs:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TotalPercent
Merged60660%
Still Open25825%
Closed (Unmerged)12813%
Deleted (By Student)222%
-

I was glad to see so many get merged, and so few get closed without full resolution.  There are lots of projects that are slow to respond to PRs, or never respond.  But the majority of the "Still Open" PRs are those that were completed in the last few weeks.

Next, I was really interested to see which languages the students would choose.  All of the students are in their final semesters of a programming diploma or degree, and have learned half-a-dozen programming languages by now.  What did they choose?  Here's the top of the list:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
LanguageTotal (PRs)Percent
JavaScript50851%
Python859%
C++798%
Java687%
TypeScript393.9%
C#343.4%
Swift282.8%
Other15214.9%
-

In some ways, no surprises here. This list mirrors other similar lists I've seen around the web.  I suspect the sway toward JS also reflects my own influence in the courses, since I teach using a lot of case studies of JS projects.

The "Other" category is interesting to me.  Many students purposely chose to work in languages they didn't know in order to try something new (I love and encourage this, by the way).  Among these languages I saw PRs in all of:

Rust (16), Go (13), Kotlin (11), PHP (5), Ruby (3), Clojure (3), Lua (2), as well as Scala, F#, Dart, PowerShell, Assembly, GDScript, FreeMaker, and Vim script.

Over the weeks and months, my goal for the students is that they would progress, working on larger and more significant projects and pull requests.  I don't define progress or significant in absolute terms, since each student is at a different place when they arrive in the course, and progress can mean something quite different depending on your starting point.  That said, I was interested to see the kinds of "larger" and more "complex" projects the students chose.  Some examples of more recognizable projects and repos I saw:

They worked on editors (Notepad++, Neovim), blogging platforms (Ghost, WordPress), compilers (emscripten), blockchain, game engines, email apps, online books, linting tools, mapping tools, terminals, web front-end toolkits, and just about everything you can think of, including lots of things I wouldn't have thought of (or recommended!).

They also did lots and lots of very small contributions: typos, dead code removal, translation and localization, "good first issue," "help wanted," "hacktoberfest."  I saw everything.

Stories from Student Blogs

Along the way I had them write blog posts, and reflect on what they were learning, what was working and what wasn't, and how they felt.  Like all students, many do the bare minimum to meet this requirement; but some understand the power and reach of a good blog post.  I read some great ones this term.  Here are just a few stories of the many I enjoyed watching unfold.

I.

Julia pushed herself to work on a lot of different projects, from VSCode to Mozilla's Voice-Web to nodejs.  Of her contribution to node, she writes:

I think one of my proudest contributions to date was for Node.js. This is something I never would have imagined contributing to even just a  year ago.

We talk a lot, and openly, about imposter syndrome.  Open source gives students a chance to prove to themselves, and the world, that they are indeed capable of working at a high level.  Open source is hard, and when you're able to do it, and your work gets merged, it's very affirming on a personal level.  I love to see students realize they do in fact have something to contribute, that maybe they do belong.

Having gained this confidence working on node, Julia went on to really find her stride working within Microsoft's Fast-DNA project, fixing half-a-dozen issues during the winter term:

I’ve gotten to work with a team that seems dedicated to a strong  development process and code quality, which in turn helps me build good  habits when writing code.

Open source takes students out of the confines and limitations of a traditional academic project, and lets them work with professionals in industry, learning how they work, and how to build world-class software.

II.

Alexander was really keen to learn more about Python, ML, and data science.  In the fall he discovered the data analysis library Pandas, and slowly began learning how to contribute to the project.  At first he focused on bugs related to documentation and linting, which led to him learning how their extensive unit tests worked.  I think he was a bit surprised to discover just how much his experience with the unit tests would help him move forward to fixing bugs:

In the beginning, I had almost no idea what any of the functions did and I would get lost navigating through the directories when searching for something. Solving linting errors was a great start for me and was also challenging enough due to my lack of knowledge in open  source and the Pandas project specifically. Now I could identify where the issue originates from easily and also  write tests to ensure that the requested functionality works as expected. Solving the actual issue is still challenging because finding a solution to the actual problem requires the most time and research.  However, now I am able to solve real code problems in Pandas, which I  would not be able to do when I started. I'm proud of my progress...

Open source development tends to favour many small, incremental improvements vs. big changes, and this maps well to the best way for students to build confidence and learn: bit at a time, small steps on the road of discovery.

One of the many Pandas APIs that Alexander worked on was the dropna() function.  He fixed a number of bugs related to its implementation. Why bother fixing dropna?  With the recent black hole imaging announcement, I noticed that source code for the project was put on GitHub.  Within that code I thought it was interesting to discover Pandas and dropna() being used, and further, that it had been commented out due to a bug.  Was this fixed by one of Alexander's recent PRs?  Hard to say, but regardless, lots of future  scientists and researchers will benefit from his work to fix these bugs.  Software maintenance is rewarding work.

Over and over again during the year, I heard students discuss how surprised they were to find bugs in big software projects.  If you've been working on software for a long time, you know that all software has bugs.  But when you're new, it feels like only you make mistakes.

In the course I emphasize the importance of software maintenance, the value of fixing or removing existing code vs. always adding new features.  Alexander spent all his time maintaining Pandas and functions like dropna(), and I think it's an ideal way for students to get involved in the software stack.

III.

Volodymyr was interested to gain more experience developing for companies and projects he could put on his resume after he graduates.  Through the fall and winter he contributed to lots of big projects: Firefox, Firefox Focus, Brave for iOS, VSCode, and more.  Eventually he found his favourite, Airbnb's Lona project.

With repeated success and a trail of merged PRs, Volodymyr described being able to slowly overcome his feelings of self doubt: "I wasn’t sure if I was good enough to work on these bugs."

A real turning point for him came with a tweet from Dan Abramov, announcing a project to localize the React documentation:

I develop a lot using React, and I love this library a lot. I wanted to  contribute to the React community, and it was a great opportunity to do  it, so I applied for it. Shortly after it, the repository for Ukrainian  translation was created, and I was assigned to maintain it 🤠

Over the next three months, Volodymyr took on that task of maintaining a very active and high-profile localization project (96% complete as I write this), and in so doing learned all kinds of things about what it's like to be on the other side of a pull request, this time having to do reviews, difficult merges, learning how to keep community engaged, etc.  Seeing the work ship has been very rewarding.

Open source gives students a chance to show up, to take on responsibility, and become part of the larger community.  Having the opportunity to move from being a user to a contributor to a leader is unique and special.

My Own Personal Learning

Finally, I wanted to pause for a moment to consider some of the things I learned with this iteration of the courses.  In no particular order, here are some of the thoughts I've been having over the last week:

Mentoring 100+ students across 300+ projects and 28 programming languages is daunting for me.  Students come to me every day, all day, and ask for help with broken dev environments, problems with reviewers, issues with git, etc.  I miss having a single project where everyone works together, because it allows me to be more focused and helpful.  At the same time, the diversity of what the students did speaks to the value of embracing the chaos of open source in all its many incarnations.

Related to my previous point, I've felt a real lack of stable community around a lot of the projects the students worked in.  I don't mean there aren't people working on them.  Rather, it's not always easy to find ways to plug them in.  Mailing lists are no longer hot, and irc has mostly disappeared.  GitHub Issues usually aren't the right place to discuss things that aren't laser focused on a given bug, but students need places to go and talk about tools, underlying concepts in the code, and the like.  "What about Slack?"  Some projects have it, some don't.  Those that do don't always give invitations easily.  It's a bit of a mess, and I think it's a thing that's really missing.

Open source work on a Windows machine is still unnecessarily hard.  Most of my students use Windows machines.  I think this is partly due to cost, but also many of them simply like it as an operating system.  However, trying to get them involved in open source projects on a Windows machine is usually painful.  I can't believe how much time we waste getting basic things setup, installed, and building.  Please support Windows developers in your open source projects.

When we start the course, I often ask the students which languages they like, want to learn, and feel most comfortable using.  Over and over again I'm told "C/C++".  However, looking at the stats above, C-like languages only accounted for ~15% of all pull requests.  There's a disconnect between what students tell me they want to do, and what they eventually do.  I don't fully understand this, but my suspicion is that real-world C/C++ code is much more complicated than their previous academic work.

Every project thinks they know how to do open source the right way, and yet, they all do it differently.  It's somewhat hilarious for me to watch, from my perch atop 300+ repos.  If you only contribute to a handful of projects within a small ecosystem, you can start to assume that how "we" work is how "everyone" works.  It's not.  The processes for claiming a bug, making a PR, managing commits, etc. is different in just about every project.  Lots of them expect exactly the opposite behaviour!  It's confusing for students.  It's confusing for me.  It's confusing for everyone.

It's still too hard to match new developers with bugs in open source projects.  One of my students told me, "It was easier to find a husband than a good open source project to work in!"  There are hundreds of thousands of issues on GitHub that need a developer.  You'd think that 100+ students should have no problem finding good work to do.  And yet, I still find it's overly difficult.  It's a hard problem to solve on all sides: I've been in every position, and none of them are easy.  I think students waste a lot of time looking for the "right" project and "perfect" bug, and could likely get going on lots of things that don't initially look "perfect."  Until you have experience and confidence to dive into the unknown, you tend to want to work on things you feel you can do easily.  I need to continue to help students build more of this confidence earlier.  It happens, but it's not quick.

Students don't understand the difference between apps and the technologies out of which they are made.  Tools, libraries, frameworks, test automation--there is a world of opportunity for contribution just below the surface of visible computing.  Because these areas are unknown and mysterious to students, they don't tend to gravitate to them.  I need to find ways to change this.  Whenever I hear "I want to work on Android apps..." I despair a little.

Teaching open source in 2019 has really been a proxy for teaching git and GitHub.  While I did have some students work outside GitHub, it was really rare.  As such, students need a deep understanding of git and its various workflows, so this is what I've focused on in large part.  Within days of joining a project, students are expected to be able to branch, deal with remotes, rebase, squash commits, fix commit messages, and all sorts of other intermediate to advanced things with git.  I have to move fast to get them ready in time.

Despite all the horrible examples you'll see on Twitter, the open source community has, in large part, been really welcoming and kind to the majority of my students.  I'm continually amazed how much time maintainers will take with reviews, answering questions, and helping new people get started.  It's not uncommon for one of my students to start working on a project, and all of a sudden be talking to its creator, who is patiently walking them through some setup problem. Open source isn't always a loving place (I could tell you some awful stories, too).  But the good outweighs the bad, and I'm still happy to take students there.

Conclusion

I'm ready for a break, but I've also had a lot of fun, and been inspired by many of my best students.  I'm hoping I'll be able to teach these courses again in the fall.  Until then, I'll continue to reflect on what worked and what didn't, and try to improve things next time.

In the meantime, I'll mention that I could use your support.  Doing this work is hard, and requires a lot of my time.  In the past I've had companies like Mozilla generously help me stay on track.  If you or your company would like to find ways to partner or support this work, please get in touch.  Also, if you're hiring new developers or interns, please consider hiring some of these amazing students I've been teaching.  I know they would be grateful to talk to you as well.

Thanks for your continued interest in what we're doing.  I see lots of you out there in the wild, doing reviews, commenting on pull requests, giving students a favourite on Twitter, leaving a comment in their blog.  Open source works because people take the time to help one another.

]]>
<![CDATA[The technology of nostalgia]]>Today one of my first year web students emailed me a question:

Today when I was searching some questions on StackOverflow, I found their website turns to a really interesting display and can be changed to the regular one back and forth by a button on the top. I guess
]]>
https://blog.humphd.org/the-technology-of-nostalgia/5ca23c3624511e03496b3aefMon, 01 Apr 2019 17:04:01 GMTToday one of my first year web students emailed me a question:

Today when I was searching some questions on StackOverflow, I found their website turns to a really interesting display and can be changed to the regular one back and forth by a button on the top. I guess it's some April Fool's day joke...how did they make the mouse pointer thing? There are stars dropping and tracking the pointer when you move it, I was thinking of inspecting it to get a sense, but then I realized I usually use the pointer to click on an element for inspecting, but I can't click the mouse itself, so I'm lost...Here's a website to explain what I'm saying: https://stackoverflow.com/questions/1098040/checking-if-a-key-exists-in-a-javascript-object?rq=1 Could you tell me how does that work, is this till done by CSS? or JS?

I went to look, and here is an animation of what I found.  Notice the trail of stars behind my mouse pointer as it moves:

My first thought is that they must have a mousemove handler on the body.  I opened the dev tools and looked through the registered event listeners for mousemove.  Sure enough, there was one one registered on the document, and the code looked like this:

function onMouseMove(e) {
-    cursor.x = e.clientX;
-    cursor.y = e.clientY;
-
-    addParticle(
-           cursor.x,
-           cursor.y,
-           possibleColors[Math.floor(Math.random()*possibleColors.length)]
-    );
-}

Reading a bit further into the file revealed this comment:

/*!
- * Fairy Dust Cursor.js
- * - 90's cursors collection
- * -- https://github.com/tholman/90s-cursor-effects
- * -- https://codepen.io/tholman/full/jWmZxZ/
- */

This is using tholman's cursor-effects JS library, and specifically the fairyDustCursor.

This code is really fun to read, especially for my early web students.  It's short, readable, not unnecessarily clever, and uses really common things in interesting ways.  Almost everything it does, my students have seen before--they just might not have thought to put it all together into one package like this.

Essentially how it works is that Partcle objects are stored in an array, and each one gets added to the DOM as a <span>*</span> with a different colour, and CSS is used to move (translate) each away from an origin (the mouse pointer's x and y position).  Over time (iterations of the requestAnimationFrame loop), each of these particles ages, and eventually dies, getting removed from the array and DOM.

As I read the code, something else struck me.  Both Stack Overflow and the cursor-effects library talk about this style of web site being from the 90s.  It's true, we didn't have the kind of refined and "delightful experiences" we take for granted today.  It was a lot of flashing, banner adds, high contrast colours, and people inventing (often badly) as they went.

Yet reading the code for how this effect was done, I couldn't help but pause to reflect on how modern it is at the same time.  Consider some of the browser APIs necessary to make this "90s" effect possible, and when they were first shipped (all dates are from caniuse.com):

  1. querySelector c. 2008 WebKit
  2. translate3d c. 2009 WebKit
  3. touchstart event c. 2009 Google
  4. requestAnimationFrame c. 2010 Mozilla
  5. pointer-events, touch-action c. 2012 Microsoft
  6. will-change c. 2014 Google

The progress that's been made on the web in the past 10 years is incredible.  In 2019 it only takes a few lines of code to do the kind of creative things we struggled with in 1999.  The web platform has evolved to something really great, and I love being part of it.

]]>
<![CDATA[Browsing open source projects]]>This week I've asked my open source students to research three new (to them) projects.  In the spirit of walking-the-talk, I wanted to share three that I found recently.

I'm constantly looking at new projects and repos.  I love GitHub's Trending feed, and discovering new things via Twitter, podcasts, and

]]>
https://blog.humphd.org/browsing-open-source-projects/5c59b0dd6b6baf39f0b1bc86Tue, 05 Feb 2019 17:12:03 GMTThis week I've asked my open source students to research three new (to them) projects.  In the spirit of walking-the-talk, I wanted to share three that I found recently.

I'm constantly looking at new projects and repos.  I love GitHub's Trending feed, and discovering new things via Twitter, podcasts, and blog posts.  Partly I'm just curious, and love to find inspiration in other people's work.  And partly I'm always shopping for new places that my students can get involved.  Helping hundreds of students find open source projects each year is a full-time job.

Without further ado, here are the projects I wanted to discuss:

  1. The Buttercup password manager
  2. The Open Railroad Tycoon 2 game
  3. The Svelte UI Framework

Buttercup

I'm a huge fan of password managers.  I myself use 1Password, and have purchased the Family Plan so everyone in our house can use it, safely share accounts, sensitive documents, sync between all our devices, etc.  However, not everyone needs all these features, nor does it make sense for everyone to take on the subscription cost.

Luckily there are a number of good free and/or open alternatives to 1Password.  One that I've recently learned about is Buttercup.  I first learned about it via this Changelog podcast episode.  In it, one of the two core maintainers of Buttercup, Perry Mitchell, discuss the history and future of the project.

The scope and quality of the project is really striking, with desktop clients for every OS, mobile apps, and browser extensions, all of which look beautiful.

Buttercup desktop (Electron) client

The project is built using React, Electron, React-Native, and also includes encryption code in Rust and WebAssembly.  Very often I have students tell me that they want to work on something that uses React vs. contributing to React itself, and in my experience, it can hard to find projects like this.  Everyone uses it, but not everyone who uses it does so in an open source context.  Similarly, people want to work on mobile apps, or tell me they want to try something with Rust.  Having so much of this modern tech all in one project is great.  

I also really like how the whole thing is split up into an ecosystem of separate apps, tools, and repos.  There are lots of ways someone could get involved without having to necessarily take on the whole thing.  Right now there are 232 open issues across all these repos--plenty of opportunity for a few students to dive in and make a contribution.

OpenRTC2

The second project I wanted to mention is one I recently saw via Twitter, an open source remake of the popular game RollerCoaster Tycoon 2.  I don't play many games, but many of my students do, and this is what caught my interest.  

OpenRTC2 is a fairly big project, with roughly half-a-million lines of C/C++ code, and a very active developer and user community.  There are great docs and lots of open issues (944), with some even labelled as good first issue.

Because our students study C/C++ for 3 terms before they get to the open source courses, a lot of them tell me that they want to work on something in C++.  However, when I show them possible projects, the code often overwhelms them because there is too much of a gap between their academic experience and real-world C++.  Also, the projects written in C++ can often be very low level, abstract, and difficult to use in ways that are easy for a beginner to understand, or only work on particular platforms.  With OpenRTC2, a lot of these problems are less of an issue, with the game running on every OS, and the "build" being relatively easy and fun to test.

Svelte

The last project I wanted to discuss is one I also heard about via a podcast.  Svelte is a framework-less UI framework.  I've heard about it in the past, but never used it, or dug into the project in any depth.  Listening to Rich Harris discuss the upcoming 3.0 release, I got really interested in the approach.  Where React and Vue update UI by applying runtime diffs between a virtual DOM and the real DOM, Svelte takes a different approach: using compile time analysis to generate pure JavaScript UI code that doesn't need a runtime framework.

For work that I'm about to start on some UI components, I've been looking for a way to write code that isn't necessarily React, Vue, Angular, etc. but could be used in those frameworks, or in a vanilla DOM app.  Svelte seems like a good middle road for building something that can work in a lot of different contexts.

The community is a lot smaller than React or Vue, which you could view as a good or bad thing.  I think it's good from the standpoint of someone wanting to contribute, because there is a bit less pressure on the issues.  The community uses Discord for real-time chat, which is nice for contributors wanting to get a question answered.  There are lots of open issues across all the various repos in the ecosystem.  Like Buttercup, the project is split into many separate, smaller repos.

]]>
<![CDATA[Credit where credit's due]]>This past week one of my open source students was presenting on a bug she'd fixed in a big project.  Toward the end of her demo, she made an off-hand comment about how a maintainer had included her fix, but not the commit itself.  Her code found its way into

]]>
https://blog.humphd.org/credit-where-credits-due/5c4e5b3f6b6baf39f0b1bc60Mon, 28 Jan 2019 01:48:04 GMTThis past week one of my open source students was presenting on a bug she'd fixed in a big project.  Toward the end of her demo, she made an off-hand comment about how a maintainer had included her fix, but not the commit itself.  Her code found its way into the project, but she didn't.

This is problematic on a number of levels.  IANAL, so I won't spend a lot of time diving into the legal and copyright issues.  However, I did want to say something practical about the community side of this.

When new people contribute to a project, part of what they're after is the chance to belong, to show up in the credits, and to be seen to be capable of doing work at this level.  It's an important signal.  I can't tell you how many former students come back to tell me about what it's like applying to jobs when you already have a significant open source contribution history.  One story I love: a former student was in an interview with 3 senior dev leads, and one asked: "What kind of software have you worked on?"  She causally pointed to his monitor, which was running VSCode.  "That," she pointed.  Enough said.

A developer who has been doing this day-in and day-out can become blasé about the whole thing.  Facebook's devs land 100K commits a week, for example; so what's one more?  If it's your first, or represents a major step forward, I think it's important that it get treated with some care and respect.

To that end, I wanted to take a moment to point out to my colleagues the usefulness of GitHub's Co-authored-by commit message parsing.  In cases where you've somehow lost the original author info, this can allow you to still leave a note that more than one person worked on this.  All you need to do is structure your commit message like so:

Fixing bug xyz
-
-Co-authored-by: <name@example.com>
-Co-authored-by: another-name <another-name@example.com>

In GitHub, the commit will show all the various people involved in the work. It's a small thing, but it's really easy to add, and in so doing, visibly welcome someone into your project and community.

]]>
<![CDATA["I want to learn how to maintain software"]]>I wanted to pause for a moment in order to reflect on one of the more interesting things I heard from a student last week.  I often begin my open source classes by asking students why they are there.  What is "open source", I want to know, since most haven't

]]>
https://blog.humphd.org/i-want-to-learn-how-to-maintain-software/5c3e256d6b6baf39f0b1bbceTue, 15 Jan 2019 19:14:37 GMTI wanted to pause for a moment in order to reflect on one of the more interesting things I heard from a student last week.  I often begin my open source classes by asking students why they are there.  What is "open source", I want to know, since most haven't done it before, and I'm curious why they've chosen to learn it.  Often I'm told that people want to learn new technology, gain experience, add interesting projects to their resume, etc; all excellent answers.

But this time, one woman put up her hand and said something I don't usually hear: "I want to learn how to maintain software."  Amazing.  What a thing to be aware of while still in school.  My experience is that most students (and professors) haven't had to deal with programs outside the context of an assignment; and assignments are never maintained.  Get something working (anything!) and hand it in before the deadline.  That's how software's built, right?

Truth be told, a lot of it is.  In much the same way that we do when creating our built environment, the demands of software projects require us to borrow heavily, and incur large amounts of technical debt.  "If you're not embarrassed by your software, you waited too long to ship."  Catching up with that debt, iterating on your product, getting feedback from users, dealing with security issues, adding localization data, updating against shifting APIs, evolving regulation--software is never finished, and software can't possible continue to work without being maintained.

This last point can be hard to understand if you don't work on software.  It's not hard to understand why, since we prefer buzzword-compliant and aspirational descriptions of technology to what it's really like.  Let's be honest, "smart TV" sounds a lot better in the marketing material than discussing how none of the software will work with existing services, or your new phone, in 3 years.  In 2019 all of our devices are now smart in a way they didn't seem to be 20 years ago.  Very little changed with regard to how we build software in that same period.

I'm really fed up with the current fetish for INNOVATION, the worship of NEW, and the complete lack of honesty about the future and where this tech will be in 2 years.  We've been fooled by a conjurer's trick, and mistaken fast for smart: computers do things quickly, not intelligently.  Your computer, and all of its software, are incapable of adjusting to the changing environments in which they are used.  What they do, they do quickly.  But doing the wrong thing quickly is not smart.  Unless you point things in the right direction, moving fast doesn't get you where you need to go. At this point I don't even need to give you examples, every one of us has been burned at some point already.

Keeping software tuned to the current demands of users, and the ecosystems in which it runs, is something we need to talk about more.  It doesn't happen by prefixing a product name with SMART.  Rather, we have to carefully cajole legacy software into working with new code.  It's hard to do well, and it's an ongoing need.

For my students learning open source, there exists an interesting opportunity to get their hands dirty working in new and old code simultaneously.  Another of my students this week told me how excited she was to work on the Apache Web Server, because she was so curious how you kept a project going across decades.  I understand what she sees here, and it's the same reason I keep coming back to the web and web browsers as my own source of inspiration.  Technology that doesn't vanish after being suddenly INNOVATED into existence has the potential to become core infrastructure, and change society.  To me, that's really exciting.

Working to maintain software is an incredible way to learn how both the old and the new work.  You end up having to understand both worlds, and it makes you a strong developer as a result.  "Surely someone has solved this already."  They have.  "Surely there's a better way than this."  There is.

Not all software needs to live forever.  There's a lot of code I'm happy to never have to use again.  The code that does last does so for reasons worth exploring.  Increasingly, that code is also open, and accessible to those who would come and get involved.  It has to be, because trying to maintain code for a long time is a team sport.  For example, I loved reading this week about the work in the Linux Kernel to prepare for the year-2038 bug.

If you want to learn about how to maintain software, get involved with open source software and learn how they keep the lights on year after year.  That's what I'll be doing this term.

On that note, I'm particularly interested in finding good projects for my students to work on that involve helping to maintain vs. create new tech.  There's been lots of talk of the need for this recently, and it is only going to increase.  I'd love to have better visibility into projects that need help doing this kind of work, beyond the "help needed" and "good first bug" labels everyone uses.  If you have a need, and want some people to join you in maintaining something, get in touch.  My students are looking for projects.

]]>
<![CDATA[Processing.js 2008-2018]]>Yesterday Pomax DM'ed me on Twitter to let me know he'd archived the Processing.js GitHub repo. He's been maintaining it mostly on his own for quite a while, and now with the amazing p5js project, there isn't really a need to keep it going.

-

I spent the rest of

]]>
https://blog.humphd.org/processing-js-2008/5c048f238ee42603d33e3a51Mon, 03 Dec 2018 04:12:00 GMTYesterday Pomax DM'ed me on Twitter to let me know he'd archived the Processing.js GitHub repo. He's been maintaining it mostly on his own for quite a while, and now with the amazing p5js project, there isn't really a need to keep it going.

-

I spent the rest of the day thinking back over the project, and reflecting on what it meant to me. Like everyone else in May 2008, I was in awe when John Resig wrote his famous reverse birthday present blog post, showing the world what he'd been hacking together:

-
-

I've decided to release one of my largest projects, in recent memory. Processing.js is the project that I've been alluding to for quite some time now. I've ported the Processing visualization language to JavaScript, using the Canvas element. I've been working on this project, off-and-on now, for the past 7 months.

-
-

It was nothing short of epic. I had followed the development of Processing since I was an undergrad. I remember stumbling into the aesthetics + computation group website at MIT in my first year, and becoming aware of the work of Ben Fry, John Maeda, Casey Reas and others. I was smitten. As a student studying both humanities and CS, I didn't know anyone else who loved computers and art, and here was an entire lab devoted to it. For many years thereafter, I followed along from afar, always amazed at the work people there were doing.

-

Then, in the fall of 2009, as part of my work with Mozilla, Chris Blizzard approached me about helping Al MacDonald (f1lt3r) to work on getting Processing.js to 1.0, and adding the missing 3D API via WebGL. In the lead-up to Firefox 3.7, Mozilla was interested in getting more canvas based tech on the web, and in finding performance and other bugs in canvas and WebGL. Processing.js, they thought, would help to bring a community of artists, designers, educators, and other visual coders to the web.

-

Was I interested!? Here was a chance to finally work alongside some of my technical heroes, and to get to contribute to a space I'd only ever looked at from the other side of the glass. "Yes, I'm interested." I remember getting my first email from Ben, who started to explain what Processing was--I didn't need any introductions.

-

That term I used Processing.js as the main open source project in my open source class. As Al and I worked on the code, I taught the students how things worked, and got them fixing small bugs. The code was not the easiest first web project for students: take a hybrid of Java and make it work, unmodified, in the browser, using DOM and canvas APIs. This was before transpilers, node, and the current JS ecosystem. If you want to learn the web though, there was no better way than to come at it from underneath like this.

-

I had an energetic group of students with a nice set of complimentary skills. A few had been working with Vlad on 3D in the browser for a while, as he developed what would become WebGL. Andor Salga, Anna Sobiepanek, Daniel Hodgin, Scott Downe, Jon Buckley, and others would go on to continue working on it with me in our open source lab, CDOT.

-

Through 2009-11 we worked using the methods I'd learned from Mozilla: open bug tracker, irc, blogs, wikis, weekly community calls, regular dot-releases.

-

Because we were working in the open, and because the project had such an outsized reputation thanks to the intersections of "Ben & Casey" and Resig, all kinds of random (and amazing) people showed up in our irc channel. Every day someone new from the who's who of design, graphics, gaming, and the digital art worlds would pop in to show us a demo that had a bug, or to ask a question about how to make something work. I spent most of my time helping people debug things, and writing tests to put back into the project for performance issues, parser bugs, and API weirdness.

-

One day a musician and digital artist named Corban Brook showed up. He used Processing in his work, and was interested to help us fix some things he'd found while porting an old project. He never left. Over the months he'd help us rewrite huge amounts of the code, taught us git, and become a big brother to many of the students. I learned a ton from him about git and JS.

-

Then there was the time this mathematician came into the channel, complaining about how poor our font code and bezier curve implementation. It turned out he knew what he was talking about, and we never let him leave either. Pomax would go on to become one of the most important maintainers on the project, and a long time friend.

-

Another time an unknown nickname, "notmasteryet," appeared. He started submitting massive pull requests, but never really said anything. At one point he rewrote our entire Java-to-JavaScript parser from scratch and magically fixed hundreds of bugs we couldn't solve. "notmasteryet" turned out to be Yury Delendik, who would go on to join Mozilla and build every cool thing you've seen the web do in the past 10 years (pdf.js, shumway to name a few).

-

Being part of this eclectic mix of hackers and artists was intoxicating. Whatever skill one of you lacked, others in the group had it. At one point, the conversation moved toward how to use the browser to mix audio and visuals with processing.js. I had no idea how sound worked, but I did understand how to hack into Gecko and get the data, Corban was a master with FFTs, Al knew how to make the visuals work, and Yury knew everything the rest of us didn't.

-

We set out to see if we could connect all the dots, and began hacking on a new branch of our code that used a version of Firefox I modified to emit audio events. Our work would eventually be shipped in Firefox 4 as the Audio Data API, and lead to what is now the standardization of the Web Audio AI. I still remember the first time we got all of our pieces working together in the browser, and Corban filmed it. Magic!

-

From there the group only got larger, and the ideas for processing.js more ambitious. With the addition of people like CJ and Bobby, we started building big demos for Mozilla, which doubled as massive performance tests for browsers trying to compete for speed with WebGL: Flight of the Navigator, No Comply. And these led to yet more browser APIs for gaming, like Pointer Lock and Gamepad.

-

Since then it's been amazing to watch all the places that processing.js has gone. Twitter has always been full of people discovering it, and sharing their work, not least because of John and Khan Academy using it there in their curriculum. Years later, I even got to use it there with my own children to teach them to code.

-

I truly loved working on processing.js, probably more than any other project I've done in the past 10 years. It was my favourite kind of software to build for a few reasons:

-
    -
  • we were implementing Ben's spec. All of our tests and decisions were based on "what does p5 do?" The freedom not to have to decide, but to simply execute, was liberating.
  • -
  • we had an enormous amount of pre-existing code to test, and slowly make work. There's no way I could have built processing.js from zero. But I love porting everyone's existing projects.
  • -
  • the project was totally based on tests: unit tests, performance tests, visual snapshot/ref tests, parser tests. I learned how to think about code in terms of tests by working on Mozilla, but I learned to love tests through processing.js
  • -
  • it could be run without installing anything. Every time we made something new work, you just had to hit Refresh in your browser. That sounds so obvious, but for the community of Java devs coming to the web via processing.js, it was eye opening.
  • -
  • we could put time and attention into docs, examples, and guides. Casey and Ben had done so much of this, and we learned a lot from his approach and style.
  • -
  • it let me move up and down the web stack. I spent as much time working on performance issues in Firefox as I did in JavaScript. We found a ton of things in WebGL (I was even able to find and get a security bounty for a bug with TypedArrays). I remember once sitting with Boris Zbarsky in Boston, and having him teach me, slowly, how to figure out why our code was falling off of the JIT tracing, and how to fix it. Eventually we got back on JIT, thanks to bz :)
  • -
-

While it's definitely time for processing.js to be archived and other projects to take its place, I wanted to at least say a proper goodbye. I'm thankful I got to spend so many years working in the middle of it, and to have had the chance to work with such a creative part of the internet.

-

Thanks, too, to Pomax for keeping the lights on years after the rest of us had gone to other projects.

-

And to processing.js, goodnight. Thanks for all the unit tests.

-]]>
<![CDATA[Observations on Hacktoberfest 2018]]>This term I'm teaching two sections of our Topics in Open Source Development course. The course aims to take upper-semester CS students into open source projects, and get them working on real-world software.

-

My usual approach is to put the entire class on the same large open source project. I

]]>
https://blog.humphd.org/observations-on-hacktoberfest-2018/5bde39098ee42603d33e3a4bSun, 04 Nov 2018 01:53:30 GMTThis term I'm teaching two sections of our Topics in Open Source Development course. The course aims to take upper-semester CS students into open source projects, and get them working on real-world software.

-

My usual approach is to put the entire class on the same large open source project. I like this method, because it means that students can help mentor each other, and we can form a shadow-community off to the side of the main project. Typically I've used Mozilla as a place to do this work.

-

However, this term I've been experimenting with having students work more freely within open source projects on GitHub in general. I also wanted to try encouraging students to work on Hacktoberfest as part of their work.

-

Hacktoberfest

-

Hacktoberfest is a yearly event sponsored by DigitalOcean, Twilio, and GitHub, which encourages new people to get involved in open source. Submit five pull requests on GitHub during October, get a T-Shirt and stickers. This year, many companies have joined in and offered to also give extra swag or prizes if people fix bugs in their repos (e.g., Microsoft).

-

Some of my students have done Hacktoberfest in the past and enjoyed it, so I thought I'd see what would happen if I got all my students involved. During the month of October, I asked my students to work on 1 pull request per week, and also to write a blog post about the experience, what they learned, what they fixed, and to share their thoughts.

-

Results

-

Now that October has ended, I wanted to share what I learned by having my students do this. During the month I worked with them to answer questions, support their problems with git and GitHub, explain build failures on Travis, intervene in the comments of pull requests, etc. Along the way I got to see what happens when a lot of new people suddenly get thrust into open source.

-

You can find all the PRs and blog posts here, but let me take you through some raw numbers and interesting facts:

-
    -
  • 61 Students began Hacktoberfest and 91% were able to finish the required 5 PRs, three completed 6, and one student completed 10.
  • -
  • 307 Pull Requests were made to 180 repositories. As of Nov 1, 53% of these have already been merged.
  • -
  • 42,661 lines of code were added, 10,387 lines deleted in 3,465 files. Small changes add up.
  • -
  • The smallest PR was a fix for a single character, the largest added 10K lines by refactoring an iOS project to use a new Swift networking library (NOTE: there were a few really large PRs which I haven't included, because they were mostly generated files in the form of node_modules). Many PRs fixed bugs by simply deleting code. "Sir, are you sure this counts?" Yes, it most certainly does.
  • -
-

One of the things I was interested in seeing was which languages the students would choose to work in, when given the choice. As a result, I tried to keep track of the languages being used in PRs. In no particular order:

-
    -
  • Rust
  • -
  • Swift
  • -
  • Scala
  • -
  • JavaScript
  • -
  • React
  • -
  • node.js
  • -
  • Markdown
  • -
  • PHP
  • -
  • Lua
  • -
  • Localization files (many types, many, many Natural languages)
  • -
  • JSON
  • -
  • C#
  • -
  • C++
  • -
  • Java
  • -
  • Go
  • -
  • Ruby
  • -
  • Python
  • -
  • HTML, CSS
  • -
  • Solidity (Etherium)
  • -
-

I was also interested to see which projects the students would join. I'll discuss this more broadly below, but here are some of the more notable projects to which I saw the students submit fixes:

-
    -
  • TravisCI
  • -
  • Microsoft VSCode
  • -
  • Mozilla Focus for iOS
  • -
  • Mozilla Addons (Frontend)
  • -
  • Brave for iOS
  • -
  • Handbrake
  • -
  • Ghost (blog platform)
  • -
  • Pandas
  • -
  • Keras
  • -
  • Jest
  • -
  • Monaco Editor
  • -
  • Microsoft (documentation for various projects)
  • -
  • Auth0
  • -
  • 30 Seconds of Code
  • -
  • Angular Material
  • -
  • Oh my zsh
  • -
-

A number of students did enough work in the project that they were asked to become collaborators. In two cases, the students were asked to take over the project and become maintainers! Careful what you touch.

-

Observations

-

I think the most valuable feedback comes from the student blogs, which I'll share quotes from below. Before I do that, let me share a few things that I observed and learned through this process.

-
    -
  1. -

    Because Hacktoberfest is a bit of a game, people game the system. I was surprised at the number of "Hacktoberfest-oriented" repos I saw. These were projects that were created specifically for Hacktoberfest in order to give people an easy way to contribute; or they were pre-existing but also provided a way to get started I hadn't foreseen. For example:

    - -

    I'll admit that I was not anticipating this sort of thing when I sent students out to work on open source projects. However, after reading their blog posts about the experience, I have come to the conclusion that for many people, just gaining experience with git, GitHub, and the mechanics of contribution, is valuable no matter the significance of the "code."

    -
  2. -
  3. -

    For those students who focused too much on repos like those mentioned above, there was often a lot of frustration, since the "maintainers" were absent and the "community" was chaotic. People would steal issues from one another, merges would overwrite previous contributions, and there was little chance for personal growth. I saw many people tire of this treatment, and eventually decide, on their own, that they needed a better project. Eventually, the "easy" way became too crowded and untenable.

    -
  4. -
  5. -

    Finding good bugs to work on continues to be hard. There are hundreds-of-thousands of bugs labeled "Hacktoberfest" on GitHub. But my students eventually gave up trying to use this label to find things. There were too many people trying to jump on the same bugs (~46K people participated in Hacktoberfest this year). I know that many of the students spent as much time looking for bugs as they did fixing them. This is an incredible statement, given that they are millions of open bugs on GitHub. The open source community in general needs to do a much better job connecting people with projects. Our current methods don't work. If you already know what you want to work on, then it's trivial. But if you are truly new (most of my students were), it's daunting. You should be able to match your skills to the kinds of work happening in repos, and then find things you can contribute toward. GitHub needs to do better here.

    -
  6. -
  7. -

    A lot of my students speak more than one language, and wanted to work on localization. However, Hacktoberfest only counts work done in PRs toward your total. Most projects use third-party tools (e.g., Transifex) outside of GitHub for localization. I think we should do more to recognize localization as first-order contribution. If you go and translate a big part of a project or app, that should count toward your total too.

    -
  8. -
  9. -

    Most of the students participated in three or more projects vs. focusing in just one. When you have as many students as I do all moving in and out of new projects, you come to realize how different open source projects are. For example, there are very few standards for how one "signs up" to work on something: some projects wanted an Issue; some wanted you to open a WIP PR with special tags; some wanted you to get permission; some wanted you to message a bot; etc. People (myself included) tend to work in their own projects, or within an ecosystem of projects, and don't realize how diverse and confusing this can be for new people. A project being on GitHub doesn't tell you that much about what the expectations are going to be, and simply having a CONTRIBUTING.md file isn't enough. Everyone has one, and they're all different!

    -
  10. -
-

Quotes from Student Blogs: "What. A. Month!"

-

More than the code they wrote, I was interested in the reflections of the students as they moved from being beginners to feeling more and more confident. You can find links to all of the blog posts and PRs here. Here are some examples of what it was like for the students, in their own words.

-

Different approaches to getting started

-

Many students began by choosing projects using tech they already knew.

-
-

"I decided to play to my strengths, and look at projects that had issues recommended for beginners, as well as developed in python"

-
-
-

"Having enjoyed working with Go during my summer internship, I was like: Hey, let's look for a smaller project that uses Go!"

-
-

Others started with something unknown, new, and different.

-
-

"my goal for this month is to contribute to at least two projects that use Rust"

-
-
-

"I'm in a position where I have to learn C# for another project in another course, so the first thing I did was search up issues on Github that were labeled 'hacktoberfest' and uses the C# language."

-
-
-

"My first pull request was on a new language I have not worked with before called Ruby...I chose to work with Ruby because it was a chance to work with something that I have not worked with before."

-
-

I was fascinated to see people evolve over the five weeks, and some who were scared to venture out of familiar territory at first, ended up in something completely new by the end.

-
-

"After I completed my pull request, I felt very proud of myself because I learned a new language which I did not think I was able to do at the beginning of this."

-
-
-

"I was able to learn a new programming language, Python, which was something I always wanted to do myself. If it wasn't for Hacktoberfest, I might keep procrastinating and never get started."

-
-

Overcoming Impostor Syndrome

-

Perhaps the single greatest effect of working on five PRs (vs. one or two), is that it helped to slowly convince people that they despite how they felt initially, they could in fact contribute, and did belong.

-
-

"Everyone posting here (on GitHub) is a genius, and it's very daunting."

-
-
-

"In the beginning, I am afraid to find issues that people post on GitHub."

-
-
-

"As I am looking at the C code I am daunted by the sheer size of some of the function, and how different functions are interacting with each other. This is surprisingly different from my experience with C++, or Object Oriented Programming in general."

-
-

Small successes bring greater confidence:

-
-

"Fixing the bug wasn't as easy as I thought it would be - the solution wasn't too straightforward - but with the help of the existing code as reference, I was able to find a solution. When working on a project at school, the instructions are laid out for you. You know which functions to work on, what they should do, and what they shouldn't do. Working on OSS was a completely different experience. School work is straightforward and because of that I'd be able to jump straight in and code. Being used to coding in this way made my first day trying to fix the bug very frustrating. After a day of trying to fix functions that looked as if they could be related to the bug without success, I decided to look at it again the next day. This time, I decided to take my time. I removed all of my edits to the code and spent around an hour just reading the code and drawing out the dependencies. This finally allowed me to find a solution."

-
-
-

"I learned that it's not going to be easy to contribute to a project right away, and that's okay...it's important to not get overwhelmed with what you're working on, and continue 1 step at a time, and ask questions when you need help. Be confident, if I can do it, you can too."

-
-
-

"I learned that an issue didn't need to be tagged as 'beginner friendly' for it to be viable for me, that I could and should take on more challenging issues at this point, ones that would feel more rewarding and worthwhile."

-
-

A Sense of Accomplishment

-

Many students talked about being "proud" of their work, or expressed surprise when a project they use personally accepted their code:

-
-

"I do feel genuinely proud that I have 10 lines of code landed in the VSCode project"

-
-
-

"After solving this issue I felt very proud. Not only did I contribute to a project I cared about, I was able to tackle an issue that I had almost no knowledge of going in, and was able to solve it without giving up. I had never worked with React before, and was scared that I would not be able to understand anything. In actuality it was mostly similar to JavaScript and I could follow along pretty well. I also think I did a good job of quickly finding the issue in the codebase and isolating the part of code where the issue was located."

-
-
-

"I used to think those projects are maintained by much more capable people than me, now the thinking is 'yeah, I can contribute to that.' To be honest, I would never think I was able to participate in an event like Hacktoberfest and contribute to the open-source community in this way. Now, it is so rewarding to see my name amount the contributors."

-
-

Joining the Larger Open Source Community

-

Finally, a theme I saw again and again was students beginning to find their place within the larger, global, open source community. Many students had their blogs quoted or featured on social media, and were surprised that other people around the world had seen them and their work.

-
-

"I really liked how people from different backgrounds and ethnicity can help one another when it comes to diversifying their code with different cultures. I really like the fact I got to share my language (Punjabi) with other people in the community through localizing"

-
-
-

"getting to work with developers in St.Petersburg, Holland, Barcelona and America."

-
-
-

"Now I can say I have worked with people from around the world!"

-
-

Conclusion

-

I'm really impressed with Hacktoberfest, and thankful for DigitalOcean, Twilio, GitHub and others who take it upon themselves to sponsor this. We need events like this where everyone is aware that new people are joining, and it's OK to get involved. Having a sense that "during October, lots of new people are going to be coming" is important. Projects can label issues, people can approach reviews a bit differently, and everyone can use a bit more patience and support. And there's no need for any of that to end now that it's November.

-

Hopefully this helps you if you're thinking about having your students join Hacktoberfest next year. If you have other questions, get in touch. And if you'd like to help me grade all this work, I'd be happy for that contribution :)

-]]>
<![CDATA[Building Large Code on Travis CI]]>This week I was doing an experiment to see if I could automate a build step in a project I'm working on, which requires binary resources to be included in a web app.

-

I'm building a custom Linux kernel and bundling it with a root filesystem in order to embed

]]>
https://blog.humphd.org/building-large-code-on-travis/5b29074f8ee42603d33e3a3eTue, 19 Jun 2018 14:45:06 GMTThis week I was doing an experiment to see if I could automate a build step in a project I'm working on, which requires binary resources to be included in a web app.

-

I'm building a custom Linux kernel and bundling it with a root filesystem in order to embed it in the browser. To do this, I'm using a dockerized Buildroot build environment (I'll write about the details of this in a follow-up post). On my various computers, this takes anywhere from 15-25 minutes. Since my buildroot/kernel configs won't change very often, I wondered if I could move this to Travis and automate it away from our workflow?

-

Travis has no problem using docker, and as long as you can fit your build into the alloted 50 minute build timeout window, it should work. Let's do this!

-

First attempt

-

In the simplest case, doing a build like this would be as simple as:

-
sudo: required
-services:
-  - docker
-...
-before_script:
-  - docker build -t buildroot .
-  - docker run --rm -v $PWD/build:/build buildroot
-...
-deploy:
-  # Deploy built binaries in /build along with other assets
-
-

This happily builds my docker buildroot image, and then starts the build within the container, logging everything as it goes. But once the log gets to 10,000 lines in length, Travis won't produce more output. You can still download the Raw Log as a file, so I wait a bit and then periodically download a snapshot of the log in order to check on the build's progress.

-

At a certain point the build is terminated: once the log file grows to 4M, Travis assumes that all the size is noise, for example, a command running in an infinite loop, and terminates the build with an error.

-

Second attempt

-

It's clear that I need to reduce the output of my build. This time I redirect build output to a log file, and then tell Travis to dump the tail-end of the log file in the case of a failed build. The after_failre and after_success build stage hooks are perfect for this.:

-
before_script:
-  - docker build -t buildroot . > build.log 2>&1
-  - docker run --rm -v $PWD/build:/build buildroot >> build.log 2>&1
-
-after_failure:
-  # dump the last 2000 lines of our build, and hope the error is in that!
-  - tail --lines=2000 build.log
-
-after_success:
-  # Log that the build worked, because we all need some good news
-  - echo "Buildroot build succeeded, binary in ./build"
-
-

I'm pretty proud of this until it fails after 10 minutes of building with an error about Travis assuming the lack of log messages (which are all going to my build.log file) means my build has stalled and should be terminated. Turns out you must produce console output every 10 minutes to keep Travis builds alive.

-

Third attempt

-

Not only is this a common problem, Travis has a built-in solution in the form of travis_wait. Essentially, you can prefix your build command with travis_wait and it will tolerate there being no output for 20 minutes. Need more than 20, you can optionally pass it the number of minutes to wait before timing out. Let's try 30 minutes:

-
before_script:
-  - docker build -t buildroot . > build.log 2>&1
-  - travis_wait 30 docker run --rm -v $PWD/build:/build buildroot >> build.log 2>&1
-
-

This builds perfectly...for 10 minutes. Then it dies with a timeout due to there being no console output. Some more research reveals that travis_wait doesn't play nicely with processes that fork or exec.

-

Fourth attempt

-

Lots of people suggest variations on the same theme: run a command that spins and periodically prints something to stdout, and have it fork your build process:

-
before_script:
-  - docker build -t buildroot . > build.log 2>&1
-  - while sleep 5m; do echo "=====[ $SECONDS seconds, buildroot still building... ]====="; done &
-  - time docker run --rm -v $PWD/build:/build buildroot >> build.log 2>&1
-  # Killing background sleep loop
-  - kill %1
-
-

Here we log something at 5 minute intervals, while the build progresses in the background. When it's done, we kill the while loop. This works perfectly...until it hits the 50 minute barrier and gets killed by Traivs:

-
$ docker build -t buildroot . > build.log 2>&1
-before_script
-$ while sleep 5m; do echo "=====[ $SECONDS seconds, buildroot still building... ]====="; done &
-$ time docker run --rm -v $PWD/build:/build buildroot >> build.log 2>&1
-=====[ 495 seconds, buildroot still building... ]=====
-=====[ 795 seconds, buildroot still building... ]=====
-=====[ 1095 seconds, buildroot still building... ]=====
-=====[ 1395 seconds, buildroot still building... ]=====
-=====[ 1695 seconds, buildroot still building... ]=====
-=====[ 1995 seconds, buildroot still building... ]=====
-=====[ 2295 seconds, buildroot still building... ]=====
-=====[ 2595 seconds, buildroot still building... ]=====
-=====[ 2895 seconds, buildroot still building... ]=====
-The job exceeded the maximum time limit for jobs, and has been terminated.
-
-

The build took over 48 minutes on the Travis builder, and combined with the time I'd already spent cloning, installing, etc. there isn't enough time to do what I'd hoped.

-

Part of me wonders whether I could hack something together that uses successive builds, Travis caches and move the build artifacts out of docker, such that I can do incremental builds and leverage ccache and the like. I'm sure someone has done it, and it's in a .travis.yml file in GitHub somewhere already. I leave this as an experiment for the reader.

-

I've got nothing but love for Travis and the incredible free service they offer open source projects. Every time I concoct some new use case, I find that they've added it or supported it all along. The Travis docs are incredible, and well worth your time if you want to push the service in interesting directions.

-

In this case I've hit a wall and will go another way. But I learned a bunch and in case it will help someone else, I leave it here for your CI needs.

-]]>
<![CDATA[Experiments with "Good First Experience"]]>Since writing my post about good-first-bug vs. good-first-experience I've been experimenting with different approaches to creating useful walkthroughs. I think I've settled on a method that works well, and wanted to write about it, so as to encourage others to do the same.

-

First, a refresher on what I mean

]]>
https://blog.humphd.org/experiments-with-good-first-experience/5aba4f377bee6a03d4f54208Tue, 27 Mar 2018 15:02:07 GMTSince writing my post about good-first-bug vs. good-first-experience I've been experimenting with different approaches to creating useful walkthroughs. I think I've settled on a method that works well, and wanted to write about it, so as to encourage others to do the same.

-

First, a refresher on what I mean by Good First Experience (GFE). Unlike a Good First Bug (GFB), which can only be fixed by one person (i.e., it's destroyed in being solved), a GFE is reproducible by anyone and everyone willing to put in the time. As such, a GFE is not tied to the current state of the project (i.e., rapidly changing), but rather uses an old commit to freeze the state of the project so it can be recreated. Following the steps doesn't alter the project; it alters you.

-

If we think of an OSS project like a team of climbers ascending a mountain, a GFE is a camp part-way up the route that backpackers can visit in order to get a feel for the real thing. A GFE is also like a good detective novel: you know the mystery is going to get solved by the end, but nevertheless, it's thrilling to experience the journey, and see how it happens. Could I solve this before the book does?

-

With my open source students, I've tried a mix of written and in-class presentation style. My approach is usually to fix a bug in a project I don't know and document what I do. I think it's useful to put yourself on an even footing with a new developer by working in unfamiliar code. Doing so forces me to be more deliberate with how I use tools and debugging/code-reading techniques. It also means I (mostly) can't rely on years of accumulated knowledge about how code is put together, and instead have to face the challenge fresh. Obviously I can't leave my experience out of the mix, because I've been programming for over 30 years. But I can remove familiarity, which is what a lot of us rely on without realizing it. Try fixing a bug in a project and language you've never worked on before, and you'll be surprised at what you learn about yourself. When I need to humble myself, a few hours with a CSS bug is usually all I need :)

-

My first attempts at this involved writing a blog post. I've done a few of these:

- -

I enjoy this style. It's easy for me to write in my blog. However I've moved away from it for a number of reasons. First, I don't like how it recedes into the past by being tied to the history of my blog. It's important what I wrote, not when I wrote it. Instead of a journal entry, I want this to feel more like documentation. Another thing I don't like about using my blog is that it ends up being disconnected from the project and code in question. There's an unnecessary separation between the experience and the code, one that I think encourages you to read but not do anything.

-

I've since started using another style: hijacking the project's README.md file and writing everything in a branch on my fork. First, some examples:

- -

I got the idea for this approach when I wrote my guide on Machine Learning and Caffe. To do this, I needed a combination of documentation, source files, and images. Obviously my blog wouldn't suffice, so I did it as its own repo. I'd seen lots of people "blog" using Gist before (e.g., this Makefile tutorial), and I was curious to know what would happen if I repurposed an entire repo as a writing medium.

-

In the case of my ML guide, it's meant a ton of exposure (4.5K stars and weeks as a top trending repo), and nearly 500 forks. It's also formed its own community, with people filing bugs and still other people helping solve them. It also resulted in a complete Chinese translation, and thereby yet more exposure.

-

Knowing that I could use GitHub in this way, I was interested to try an even more symbiotic approach for my GFE guides:

-
    -
  • Fork the repo in question
  • -
  • Create a new branch, and freeze the project state so it is reproducable by others
  • -
  • Add a screenshots/ directory for all the images I need to include. Now I can just git add and git commit these into the repo
  • -
  • Erase the README.md file contents, and start writing my guide in there.
  • -
  • Link to files within the project, pinned to the commit that I'm on in this branch
  • -
-

I've liked a number of things that this approach provides:

-
    -
  • As an outsider, you can contribute something to a project you like that will help others get involved. I don't have to convince the project to do this. I just do it.
  • -
  • Similarly, I can do something in the project without having to get permission. I'm not asking any of the projects to make this official documentation. Whether they do or don't, it exists on GitHub identically
  • -
  • People (my students, or others) who want to try what I'm doing can simply clone my fork, and checkout my branch. All the files are there in the exactly the right state. Everything should work as it did for me. There are obviously going to be issues with environments and dependency versions I can't control as easily.
  • -
  • People can interact with what I've done to suggest corrections, file issues (just enable Issues on your fork), star the repo, etc.
  • -
-

I taught my Brave walkthrough in class yesterday, and I think it was ideally suited to the time I had (2 hours), and the level of the students. Many times in the past I would have fixed a bug live in class, but I didn't produce a guide that could be used after the class ended. By doing both, I've found that students can watch me do it live, and we can discuss lots of things that are happening; and then after class, they can read through it again, and ideally try it themselves, to further develop the skills.

-

This approach is something new that I'm enjoying, and I wanted to share it as a possible style for others to try. If you have projects you think I should do this on, let me know. I'm definitely going to try to do more of this.

-]]>
<![CDATA[On standards work]]>This week I'm looking at standards with my open source class. I find that students often don't know about standards and specs, how to read them, how they get created, or how to give feedback and participate. The process is largely invisible. The timing of this topic corresponds to a

]]>
https://blog.humphd.org/on-standards-work/5aa036377bee6a03d4f541ffWed, 07 Mar 2018 20:18:43 GMTThis week I'm looking at standards with my open source class. I find that students often don't know about standards and specs, how to read them, how they get created, or how to give feedback and participate. The process is largely invisible. The timing of this topic corresponds to a visit from David Bruant, who is a guest presenter in the class this week. I wanted to discuss his background working "open" while he was here, and one of the areas he's focused on is open standards work for the web, in particular, for JavaScript.

-

All of the students are using JavaScript. Where did it come from? Who made it? Who maintains it? Who defines it? Who is in charge? When we talk about open source we think about code, tests, documentation, and how all of these evolve. But what about open standards? What does working on a standard look like?

-

There's a great example being discussed this week all over Twitter, GitHub, Bugzilla and elsewhere. It involves a proposal to add a new method flatten() to Arrays. There are some good docs for it on MDN as well.

-

The basic idea is to allow an Array containing other Arrays, or "holes" (i.e., empty elements), to be compressed into a new "flat" Array. For example, the "flattened" version of [1, 2, [3, 4]] would be [1, 2, 3, 4]. It's a great suggestion, and one of many innovative and useful things that have been added to Array in that last few years.

-

However, changing the web is hard. There's just so much of it being used (and abused) by people all over the world in unexpected ways. You might have a good idea for a new thing the web and JavaScript can do, but getting it added is not easy. You might say to yourself, "I can see how removing things would be hard, but why is adding something difficult?" It's difficult because one of the goals of the people who look after web standards is to not intentionally break the web unnecessarily. Where possible, something authored for the web of 1999 should still work in 2019.

-

So how does flatten() break the web? Our story starts 150 years ago, back in the mid 1990s. When it arrived on the scene, JavaScript was fairly small and limited. However, people used it, loved it, (and hated it), and their practical uses of it began to wear grooves: as people wrote more and more code, best practices emerged, and some of those calcified into utility functions, libraries, and frameworks.

-

One of the frameworks was MooTools. Among other conveniences, MooTools added a way to flatten() an Array. While JavaScript couldn't do this "natively," it was possible to "shim" or "polyfill" the built-in Array type to add new properties and methods. MooTools did this in a way that causes problems: we have all be told that it's a bad idea to modify, "step on," or otherwise alter the definitions of the language and runtime without first checking to see if they are, in fact, available. We wrote code in 2007 using assumptions that don't necessarily hold true in 2018: the browsers change, versions change, the language changes.

-

I remember when I started writing JavaScript, and looking at the long list of reserved keywords I wasn't supposed to use, things like class that everyone knew you'd be safe to use, since JS doesn't use classes! Well, much like hundred-year land leases, eventually things change, and what was true once upon a time doesn't necessarily hold today. It's easy to point a finger at MooTools (many people are), but honestly, none of us thinks with enough long-term vision to truly understand all the implications of our decisions now on the world 10, 20, or 50 years hence (code people are writing today will still be in use by then, I promise you--I was a developer during Y2K, so I know it's true!).

-

At any rate, MooTools' flatten() is going to collide with JavaScript's new flatten(), because they don't work exactly the same (i.e., different argument signatures), and any code that relies on MooTools' way of doing flatten() will get...flattened.

-

And so, someone files a bug on the proposal, suggesting flatten() get changed to smoosh(). Before this gets resolved, imagine you have to make the decision. What would you do? Is "smoosh" logical? Maybe you'd say "smoosh" is silly and instead suggest "press" or "mix". Are those safe choices? What if you used a made-up word and just documented it? What about "clarmp"? What about using a word from another language? They say that naming things is one of the great problems in computer science? It really is a hard problem! On some level we really should have lexicographers sitting on these committees to help us sort things out.

-

I won't give you my opinion. I intentionally stay out of a lot of these debates because I don't feel qualified to make good decisions, nor do I feel like it matters what I think. I have ideas, possibly good ideas, but the scope and scale of the web is frightening. I've had the privilege to work with some amazing web standards people in the past, and the things they know blow my mind. Every choice is fraught, and every solution is a compromise. It's one of the reasons why I'm so patient with standards bodies and implementors, who try their best and yet still make mistakes.

-

One thing I do know for sure is that the alternative, where one person or company makes all the decisions, where old code gets trampled and forgotten by progress, where we only care about what's new--is a world that I don't want either. If I have to smoosh() in order to live on a web that's bigger than me and my preferences, I'm OK with that.

-

It's easy to laugh, but instead I think we should really be thanking the invisible, hard working, well intentioned open standards people who do amazing work to both advance the front and guard the flank.

-]]>
\ No newline at end of file diff --git a/test/test_files/humphd-yt-channel.xml b/test/test_files/humphd-yt-channel.xml deleted file mode 100644 index de3184fb7b..0000000000 --- a/test/test_files/humphd-yt-channel.xml +++ /dev/null @@ -1,360 +0,0 @@ - - - - yt:channel:UCqaMbMDf01BLttof1lHAo2A - UCqaMbMDf01BLttof1lHAo2A - David Humphrey - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2015-07-11T20:00:29+00:00 - - yt:video:mNuHA7vH6Wc - mNuHA7vH6Wc - UCqaMbMDf01BLttof1lHAo2A - DPS909 OSD600 Week 03 - Fixing a Bug in the Azure JS SDK - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-09-24T19:04:25+00:00 - 2021-10-27T10:46:10+00:00 - - DPS909 OSD600 Week 03 - Fixing a Bug in the Azure JS SDK - - - Walkthrough and discussion of fixing a bug in https://github.com/Azure/azure-sdk-for-js. Issue at https://github.com/Azure/azure-sdk-for-js/issues/15772. PR at https://github.com/Azure/azure-sdk-for-js/pull/17820. - - - - - - - - yt:video:GUXjyPp433M - GUXjyPp433M - UCqaMbMDf01BLttof1lHAo2A - DPS909/OSD600 Fall 2021 Week 01 Part 2 - Week 1 Overview - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-09-06T12:00:06+00:00 - 2021-09-16T13:20:23+00:00 - - DPS909/OSD600 Fall 2021 Week 01 Part 2 - Week 1 Overview - - - Overview of https://github.com/Seneca-CDOT/topics-in-open-source-2021 - - - - - - - - yt:video:rOBX6vRch7U - rOBX6vRch7U - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 12 WebSockets Part 1 - Intro and Writing the Server - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-04-04T13:00:30+00:00 - 2021-11-06T04:28:19+00:00 - - WEB422 Week 12 WebSockets Part 1 - Intro and Writing the Server - - - Introduction to Web Sockets and writing a Web Socket Server for an Emoji Chat App. Code at https://github.com/humphd/web422-week12 - - - - - - - - yt:video:lU9db7Dd95I - lU9db7Dd95I - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 12 WebSockets Part 2 - Writing the React Front-End - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-04-04T13:00:30+00:00 - 2021-04-05T23:01:11+00:00 - - WEB422 Week 12 WebSockets Part 2 - Writing the React Front-End - - - Creating the React Front-End for our Emoji Chat App. Code at https://github.com/humphd/web422-week12 - - - - - - - - yt:video:motlCbDr6c4 - motlCbDr6c4 - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 05 Part 4 - Deploying React Apps - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-02-07T14:00:13+00:00 - 2021-03-11T03:07:39+00:00 - - WEB422 Week 05 Part 4 - Deploying React Apps - - - Deploying a Create-React-App (https://create-react-app.dev/docs/deployment) to various services: GitHub Pages (https://pages.github.com/), Vercel (https://vercel.com/), Netlify (https://www.netlify.com/), Begin (https://begin.com/). - -Example deploy of (https://github.com/humphd/web422-week-05-artsy) to Vercel https://web422-week-05-artsy.vercel.app/ and GitHub Pages https://humphd.github.io/web422-week-05-artsy/build/ - - - - - - - - yt:video:TaSAzbTltUo - TaSAzbTltUo - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 05 Part 3 - Using Formik, Joi, and Yup with React - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-02-07T14:00:07+00:00 - 2021-08-19T07:07:52+00:00 - - WEB422 Week 05 Part 3 - Using Formik, Joi, and Yup with React - - - Example of working with Form data in the browser with Formik, and server using Express, and how to validate the data using Joi and Yup on both ends. Code available at https://github.com/humphd/web422-week05-formik. - - - - - - - - yt:video:NPf-Y2Ek6a4 - NPf-Y2Ek6a4 - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 05 Part 1 - Intro to React Forms - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-02-07T14:00:06+00:00 - 2021-05-08T09:50:22+00:00 - - WEB422 Week 05 Part 1 - Intro to React Forms - - - Introduction to React Forms, discussion of code in notes https://web422.ca/notes/react-forms. See also the official React docs on forms https://reactjs.org/docs/forms.html - - - - - - - - yt:video:5i10iPnrfmw - 5i10iPnrfmw - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 05 Part 2 - Using a Form to Load API Data - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-02-07T14:00:01+00:00 - 2021-11-03T23:24:37+00:00 - - WEB422 Week 05 Part 2 - Using a Form to Load API Data - - - Building an interactive Form in React that uses the Art Institute of Chicago REST API (http://api.artic.edu/docs/). Code available at https://github.com/humphd/web422-week-05-artsy - - - - - - - - yt:video:zcJA5YBzJG8 - zcJA5YBzJG8 - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 04 Part 3 - Using Third Party React Components for Routing and UI - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-31T14:00:30+00:00 - 2021-03-11T03:10:12+00:00 - - WEB422 Week 04 Part 3 - Using Third Party React Components for Routing and UI - - - Discussion of React Routing using React Router (https://reactrouter.com/web) and how we can use 3rd Party React Components like https://react-bootstrap.github.io/ - - - - - - - - yt:video:hgew3p5RriY - hgew3p5RriY - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 04 Part 1 - React Events - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-31T14:00:12+00:00 - 2021-03-11T04:53:59+00:00 - - WEB422 Week 04 Part 1 - React Events - - - Discussion of React Events and how they related to DOM Events (notes: https://web422.ca/notes/react-events-and-data). Code from video available at https://gist.github.com/humphd/e353ab107e561c496bf9eec78fa8cac4 - - - - - - - - yt:video:dDMgZ7TfPaI - dDMgZ7TfPaI - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 04 Part 4 - GitHub API Example - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-31T14:00:09+00:00 - 2021-03-11T04:50:40+00:00 - - WEB422 Week 04 Part 4 - GitHub API Example - - - GitHub API Example with React Router, Bootstrap React, and useSWR 3rd party Components. Code available at https://github.com/humphd/web422-week04-github-example. - - - - - - - - yt:video:WdT_coWe4ms - WdT_coWe4ms - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 04 Part 2 - React Events and Data Loading - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-31T14:00:06+00:00 - 2021-10-23T12:22:30+00:00 - - WEB422 Week 04 Part 2 - React Events and Data Loading - - - Working with Events, Data Loading, and Conditional Rendering in React. Code available at https://github.com/humphd/web422-week04-events-data-loading - - - - - - - - yt:video:rx4KuxqD3CA - rx4KuxqD3CA - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 03 Part 2 - Modern JavaScript in React - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-24T14:00:31+00:00 - 2021-09-12T04:43:29+00:00 - - WEB422 Week 03 Part 2 - Modern JavaScript in React - - - Discussion of some of the newer syntax in JavaScript that will be used extensively in React. See https://reactjs.org/docs/getting-started.html#javascript-resources and https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript and - - - - - - - - yt:video:wC0TOPzrLTI - wC0TOPzrLTI - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 03 Part 1 - React Intro - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-24T14:00:13+00:00 - 2021-01-24T14:00:13+00:00 - - WEB422 Week 03 Part 1 - React Intro - - - Intro to React. Discussion of some of the main ideas and philosophies of building web applications with React. - - - - - - - - yt:video:n1A52kPfPtI - n1A52kPfPtI - UCqaMbMDf01BLttof1lHAo2A - WEB422 Week 03 Part 4 - Writing an App with React - - - David Humphrey - https://www.youtube.com/channel/UCqaMbMDf01BLttof1lHAo2A - - 2021-01-24T14:00:12+00:00 - 2021-01-28T21:28:06+00:00 - - WEB422 Week 03 Part 4 - Writing an App with React - - - Rewrite of the Week 1 reqres.in API front-end (https://github.com/humphd/web422-week01) in React. Code available at https://github.com/humphd/web422-week03-react. Live version at https://humphd.github.io/web422-week03-react/dist/ - - - - - - - \ No newline at end of file diff --git a/test/test_files/inactive-blog-filter.test-active-feeds.txt b/test/test_files/inactive-blog-filter.test-active-feeds.txt deleted file mode 100644 index 53f19cd912..0000000000 --- a/test/test_files/inactive-blog-filter.test-active-feeds.txt +++ /dev/null @@ -1 +0,0 @@ -https://blog.humphd.org/ diff --git a/test/test_files/inactive-blog-filter.test-dead-feeds.txt b/test/test_files/inactive-blog-filter.test-dead-feeds.txt deleted file mode 100644 index 6c4cd7fe52..0000000000 --- a/test/test_files/inactive-blog-filter.test-dead-feeds.txt +++ /dev/null @@ -1 +0,0 @@ -http://KrazyDre.blogspot.com/feeds/posts/default?alt=rss diff --git a/test/test_files/inactive-blog-filter.test-inactive-feeds.txt b/test/test_files/inactive-blog-filter.test-inactive-feeds.txt deleted file mode 100644 index fe139cee28..0000000000 --- a/test/test_files/inactive-blog-filter.test-inactive-feeds.txt +++ /dev/null @@ -1 +0,0 @@ -http://ajhooper.blogspot.com/feeds/posts/default diff --git a/test/text-parser.test.js b/test/text-parser.test.js deleted file mode 100644 index ccb4e20d29..0000000000 --- a/test/text-parser.test.js +++ /dev/null @@ -1,49 +0,0 @@ -const textParser = require('../src/backend/utils/text-parser'); - -/** - * textParser() will convert HTML fragments to plain text. - */ -describe('text-parser tests', function () { - test('textParser() with doctype', () => { - const result = textParser('

Hello World

'); - expect(result).toBe('Hello World'); - }); - - test('textParser() with HTML body', () => { - const result = textParser( - '

Seneca

OpenSource Telescope
' - ); - // NOTE: we don't have a great way to deal with whitespace like this. - expect(result).toBe('SenecaOpenSource Telescope'); - }); - - test('textParser() with minimal document fragment', () => { - const result = textParser('

OSD600

'); - expect(result).toBe('OSD600'); - }); - - test('textParser() with real blog post', () => { - // Real blog post from https://grommers.wordpress.com/ - const html = `

Often we go on adventures in real-life, and its pretty straight forward, you go to a location, you ask the information kiosks, and you have a very human interaction and can ask just about any question you can think of, and they’ll either point you in the right direction (if they are a decent human being, and not someone who sends you wandering aimlessly.) or get a hold of someone who can.

- -

Now this is the first year I’ve started using git extensively, I’ve run into multiple problems much like other first time programmers. The biggest hurdle though for many of us, is confidence. Am I good enough to tackle this problem? I don’t have the answer to these problems…let me find another one!

This is the problem many beginner coder’s go through in there mind, and trust me when I say you’re not alone. It’s intimidating, it’s stressful, and it’s very easy to just put your hands up and walk away.

I don’t ever suggest doing that, and the reason why? EVERYONE feels the same feelings. So how did I begin?

- -

Well luckily enough, my professor actually forced us to get 3 issues to begin, showed us the ins and outs of searching through Github (pretty straight forward but click here if you want to look into it further.)

Link #1 – So, the first of the three I went too actually thought had to do about cheese. It was talking about a cheese wheel. I thought hey! I like cheese! Sadly, it has very little to do with cheese, but documentation. I felt that this would be a good start for me, as it will get me some experience documenting and at least it will challenge my ability to understand something. If I can write it clean and concise to another user who knows it better then I do, I think I’ve done a good job. Fairly simple, and I think its a good base to work off of!

- -

Link #2 – This will most like before my first attempt at coding something for someone else without a rubric and just my own knowledge. Discord is a big platform right now for the gaming community, and I love my gaming communities. This is a relatively simple fix, but I think can help me understand the basic foundations of how discord modding works. This will in turn give me drive to look at this code and be able to improve my own servers on these things.

- -

Link #3 – Reading more into passport.js, and actually how these logins work, I felt this is something I knew quite a bit about, but how to implement it in a different manner, and in someone else code I felt was a good thing to build off something I already knew. This will be the third issue that I’m going to try and fix.

All in all, this will be my first adventure, and hopefully not my last into the github world! If this is your first time, know I was there once as well, and as Ms. Frizzle says “Take chances, make mistakes, get messy!”

-`; - const parsed = `Often we go on adventures in real-life, and its pretty straight forward, you go to a location, you ask the information kiosks, and you have a very human interaction and can ask just about any question you can think of, and they’ll either point you in the right direction (if they are a decent human being, and not someone who sends you wandering aimlessly.) or get a hold of someone who can. - - Now this is the first year I’ve started using git extensively, I’ve run into multiple problems much like other first time programmers. The biggest hurdle though for many of us, is confidence. Am I good enough to tackle this problem? I don’t have the answer to these problems…let me find another one!This is the problem many beginner coder’s go through in there mind, and trust me when I say you’re not alone. It’s intimidating, it’s stressful, and it’s very easy to just put your hands up and walk away. I don’t ever suggest doing that, and the reason why? EVERYONE feels the same feelings. So how did I begin? - - Well luckily enough, my professor actually forced us to get 3 issues to begin, showed us the ins and outs of searching through Github (pretty straight forward but click here if you want to look into it further.)Link #1 – So, the first of the three I went too actually thought had to do about cheese. It was talking about a cheese wheel. I thought hey! I like cheese! Sadly, it has very little to do with cheese, but documentation. I felt that this would be a good start for me, as it will get me some experience documenting and at least it will challenge my ability to understand something. If I can write it clean and concise to another user who knows it better then I do, I think I’ve done a good job. Fairly simple, and I think its a good base to work off of! - - Link #2 – This will most like before my first attempt at coding something for someone else without a rubric and just my own knowledge. Discord is a big platform right now for the gaming community, and I love my gaming communities. This is a relatively simple fix, but I think can help me understand the basic foundations of how discord modding works. This will in turn give me drive to look at this code and be able to improve my own servers on these things. - - Link #3 – Reading more into passport.js, and actually how these logins work, I felt this is something I knew quite a bit about, but how to implement it in a different manner, and in someone else code I felt was a good thing to build off something I already knew. This will be the third issue that I’m going to try and fix. All in all, this will be my first adventure, and hopefully not my last into the github world! If this is your first time, know I was there once as well, and as Ms. Frizzle says “Take chances, make mistakes, get messy!” -`; - expect(textParser(html)).toBe(parsed); - }); -}); diff --git a/test/url-parser.test.js b/test/url-parser.test.js deleted file mode 100644 index b687e047ae..0000000000 --- a/test/url-parser.test.js +++ /dev/null @@ -1,48 +0,0 @@ -const urlParser = require('../src/backend/utils/url-parser'); - -describe('url-parser tests', () => { - const testUrlWithPort = 'http://127.0.0.1:9200'; - const testUrlWithoutPort = 'http://127.0.0.1'; - const testport = '9200'; - const expectedReturn1 = 'http://127.0.0.1:9200/'; - const expectedReturn2 = 'http://127.0.0.1/'; - - test('urlParser with double ports', () => { - const result = urlParser(testUrlWithPort, testport); - expect(result).toBe(expectedReturn1); - }); - - test('urlParser with 1 string port', () => { - const result = urlParser(testUrlWithoutPort, testport); - expect(result).toBe(expectedReturn1); - }); - - test('urlParser with invalid url', () => { - const result = urlParser('', testport); - expect(result).toBe(null); - }); - - test('urlParser with url with no port and no port', () => { - const result = urlParser(testUrlWithoutPort); - expect(result).toBe(expectedReturn2); - }); - - test('urlPaser with port being a null value', () => { - const result = urlParser(testUrlWithoutPort, null); - expect(result).toBe(expectedReturn2); - }); - - test('urlPaser with port being an integer', () => { - const result = urlParser(testUrlWithoutPort, Number(testport)); - expect(result).toBe(expectedReturn1); - }); - - test('urlPaser with port being an invalid string', () => { - const result = urlParser(testUrlWithoutPort, 'invalid'); - expect(result).toBe(expectedReturn2); - }); - test('urlPaser with an out of range port', () => { - const result = urlParser(testUrlWithoutPort, 999999); - expect(result).toBe(expectedReturn2); - }); -}); diff --git a/test/user.test.js b/test/user.test.js deleted file mode 100644 index f5636ded82..0000000000 --- a/test/user.test.js +++ /dev/null @@ -1,115 +0,0 @@ -const request = require('supertest'); - -const app = require('../src/backend/web/app'); - -jest.mock('../src/backend/utils/indexer'); - -// Mock the internal authentication strategy -jest.mock('../src/backend/web/authentication'); -// Use our authentication test helper -const { login, loginAdmin, logout } = require('./lib/authentication'); - -describe('test GET /user/info endpoint', () => { - afterAll(() => logout()); - - it('should respond with a 403 status when not logged in', async () => { - logout(); - const res = await request(app).get(`/user/info`); - expect(res.status).toEqual(403); - }); - - it('should respond with a 200 status and JSON Object for logged in user', async () => { - const user = login(); - - const res = await request(app).get(`/user/info`); - expect(res.status).toEqual(200); - expect(res.get('Content-type')).toContain('application/json'); - expect(res.body instanceof Object).toBe(true); - - // Data should match our logged in user - expect(res.body.name).toEqual(user.name); - expect(res.body.email).toEqual(user.email); - expect(res.body.id).toEqual(user.id); - - // We are not an admin, so that should be false - expect(res.body.isAdmin).toEqual(user.isAdmin); - expect(res.body.isAdmin).toBe(false); - }); - - it('should respond with a 200 status and JSON Object for logged in admin', async () => { - const user = loginAdmin(); - - const res = await request(app).get(`/user/info`); - expect(res.status).toEqual(200); - expect(res.get('Content-type')).toContain('application/json'); - expect(res.body instanceof Object).toBe(true); - - // Data should match our logged in user - expect(res.body.name).toEqual(user.name); - expect(res.body.email).toEqual(user.email); - expect(res.body.id).toEqual(user.id); - - // We are an admin, so that should be true - expect(res.body.isAdmin).toEqual(user.isAdmin); - expect(res.body.isAdmin).toBe(true); - }); -}); - -describe('test GET /user/feeds endpoint', () => { - afterAll(() => logout()); - - it('should respond with a 403 status when not logged in', async () => { - logout(); - const res = await request(app).get(`/user/feeds`); - expect(res.status).toEqual(403); - }); - - it('should respond with a 200 status and JSON array if logged in', async () => { - login('Johannes Kepler', 'user1@example.com'); - const res = await request(app).get(`/user/feeds`); - expect(res.status).toEqual(200); - expect(res.get('Content-type')).toContain('application/json'); - expect(res.body instanceof Array).toBe(true); - }); - - it('should get all feeds when logged in as admin', async () => { - loginAdmin(); - - // Get feeds for admin user using /user/feeds - const res1 = await request(app).get(`/user/feeds`); - expect(res1.status).toEqual(200); - expect(res1.get('Content-type')).toContain('application/json'); - expect(res1.body instanceof Array).toBe(true); - - // Get all feeds using /feeds route - const res2 = await request(app).get(`/feeds`); - expect(res2.status).toEqual(200); - expect(res2.get('Content-type')).toContain('application/json'); - expect(res2.body instanceof Array).toBe(true); - - // Make sure the two sets of feed arrays match - expect(res2.body).toEqual(res1.body); - }); - - it('should respond with an updated array after a new user feed is added/removed', async () => { - const user = login('Johannes Kepler', 'user1@example.com'); - const feedCount = (await request(app).get(`/user/feeds`)).body.length; - const feedData = { - author: user.name, - url: 'http://telescope200.cdot.systems', - user: user.id, - }; - - const res = await request(app).post('/feeds').send(feedData).set('Accept', 'application/json'); - expect(res.status).toEqual(201); - - const incremented = (await request(app).get(`/user/feeds`)).body.length; - expect(incremented).toEqual(feedCount + 1); - - const del = await request(app).delete(`/feeds/${res.body.id}`); - expect(del.status).toEqual(204); - - const decremented = (await request(app).get(`/user/feeds`)).body.length; - expect(decremented).toEqual(incremented - 1); - }); -}); diff --git a/test/wiki-feed-parser.test.js b/test/wiki-feed-parser.test.js deleted file mode 100644 index 0e9291c2f8..0000000000 --- a/test/wiki-feed-parser.test.js +++ /dev/null @@ -1,63 +0,0 @@ -/* global fetch */ -global.fetch = require('node-fetch'); - -const getWikiFeeds = require('../src/backend/utils/wiki-feed-parser'); - -const mockFeed = `################# Failing Feeds Commented Out [Start] ################# - -#Feed excluded due to getaddrinfo ENOTFOUND s-aleinikov.blog.ca s-aleinikov.blog.ca:80 -#[http://s-aleinikov.blog.ca/feed/atom/posts/] -#name=Sergey Aleinikov - - -#Feed excluded due to getaddrinfo ENOTFOUND ejtorre.blog.ca ejtorre.blog.ca:80 -#[http://ejtorre.blog.ca/feed/rss2/posts/] -#name=Eugene Torre - - -#Feed excluded due to getaddrinfo ENOTFOUND rickeyre.ca rickeyre.ca:80 -#[http://rickeyre.ca/open-source-feed.xml] -#name=Rick Eyre - -[http://kopay.wordpress.com/category/sbr600-win2011/feed] -name=Pirathapan Sivalingam - - -[http://jessefulton.wordpress.com/category/SBR600/feed/] -name=Jesse Fulton - - -[http://eric-spo600.blogspot.com/feeds/posts/default] -name=Eric Ferguson - -#Feed excluded due to getaddrinfo ENOTFOUND rickeyre.ca rickeyre.ca:80 -#[http://rickeyre.ca/open-source-feed.xml] -#name=Rick Eyre - -[http://armenzg.blogspot.com/feeds/posts/default/-/open%20source] -name=Armen Zambrano G. (armenzg)`; - -beforeEach(() => { - fetch.resetMocks(); -}); - -test('Testing wiki-feed-parser.parseData', async () => { - const mockBody = `
${mockFeed}
`; - fetch.mockResponseOnce(mockBody); - - const expectedData = [ - { - author: 'Pirathapan Sivalingam', - url: 'http://kopay.wordpress.com/category/sbr600-win2011/feed', - }, - { author: 'Jesse Fulton', url: 'http://jessefulton.wordpress.com/category/SBR600/feed/' }, - { author: 'Eric Ferguson', url: 'http://eric-spo600.blogspot.com/feeds/posts/default' }, - { - author: 'Armen Zambrano G. (armenzg)', - url: 'http://armenzg.blogspot.com/feeds/posts/default/-/open%20source', - }, - ]; - - const response = await getWikiFeeds(); - expect(response).toStrictEqual(expectedData); -}); diff --git a/tools/autodeployment/env.example b/tools/autodeployment/env.example index ec0aa683cc..3d832d0aad 100644 --- a/tools/autodeployment/env.example +++ b/tools/autodeployment/env.example @@ -1,5 +1,4 @@ # Port on which the server will be listening. -# If you change this, update DEPLOY_PORT in src/backend/web/routes/admin.js as well DEPLOY_PORT=4000 # Secret to be shared between GitHub and the server diff --git a/tools/html-elements.js b/tools/html-elements.js index 5893ab2714..405975f7a0 100755 --- a/tools/html-elements.js +++ b/tools/html-elements.js @@ -7,7 +7,7 @@ const jsdom = require('jsdom'); -const Post = require('../src/backend/data/post'); +const Post = require('../src/api/parser/data/post'); const processPosts = require('./lib/process-posts'); const { JSDOM } = jsdom; diff --git a/tools/lib/process-posts.js b/tools/lib/process-posts.js index 160060076b..5174a33234 100644 --- a/tools/lib/process-posts.js +++ b/tools/lib/process-posts.js @@ -2,7 +2,7 @@ * Helper for iterating over posts in Redis. */ -const { getPosts } = require('../../src/backend/utils/storage'); +const { getPosts } = require('../../src/api/parser/src/utils/storage'); /** * Process all posts in the database, calling the processPosts function on each.