diff --git a/docker/astra/.gitlab-ci.yml b/docker/astra/.gitlab-ci.yml new file mode 100644 index 0000000..ecffaeb --- /dev/null +++ b/docker/astra/.gitlab-ci.yml @@ -0,0 +1,41 @@ +stages: + - build_image + - tests + +variables: + VERSION: "v1" + +build_image: + stage: build_image + tags: + - docker + - dind + + script: + - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY + - docker build -t $CI_REGISTRY_IMAGE:$VERSION . + - docker tag $CI_REGISTRY_IMAGE:$VERSION $CI_REGISTRY_IMAGE:latest + - docker push $CI_REGISTRY_IMAGE:$VERSION + - docker push $CI_REGISTRY_IMAGE:latest + - docker rmi $CI_REGISTRY_IMAGE:$VERSION + - docker rmi $CI_REGISTRY_IMAGE:latest + + only: + - master + when: manual + allow_failure: false + + +tests_image: + stage: tests + image: $CI_REGISTRY_IMAGE:latest + needs: ["build_image"] + tags: + - docker + - dind + + script: + - git --version + + only: + - master diff --git a/docker/astra/Dockerfile b/docker/astra/Dockerfile new file mode 100644 index 0000000..78fbb88 --- /dev/null +++ b/docker/astra/Dockerfile @@ -0,0 +1,61 @@ +FROM sogimu/astralinux + +RUN echo "deb http://download.astralinux.ru/astra/stable/orel/repository/ orel main contrib non-free" > /etc/apt/sources.list && \ + echo "deb-src http://download.astralinux.ru/astra/stable/orel/repository/ orel main contrib non-free" >> /etc/apt/sources.list && \ + apt-get update -y && \ + apt-get upgrade -y && \ + apt-get install -y git curl sshpass && \ + apt-get install -y libboost-all-dev libcurl4-nss-dev && \ + apt-get install -y qtbase5-dev libjsoncpp-dev && \ + apt-get install -y libxmlrpc-epi-dev libwrap0-dev python-dev && \ + apt-get install -y libxmlrpc-core-c3-dev libncurses-dev libcap-dev && \ + apt-get install -y libqrencode-dev libsystemd-dev libsqlite3-dev && \ + apt-get install -y python3-lxml libftdi-dev libcanberra-gtk-module && \ + apt-get install -y libpng-dev gtk2.0-dev libssl-dev libqt5serialport5-dev xorriso apt-transport-https ca-certificates && \ + apt-get -f install -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +RUN echo "deb http://debian.proxad.net/debian/ stretch main contrib non-free" > /etc/apt/sources.list && \ + apt-get update -y && \ + apt-get install -y --allow-unauthenticated md5deep && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +RUN echo "deb http://download.astralinux.ru/astra/stable/orel/repository/ orel main contrib non-free" > /etc/apt/sources.list && \ + echo "deb-src http://download.astralinux.ru/astra/stable/orel/repository/ orel main contrib non-free" >> /etc/apt/sources.list + +# Download and install some custom shit unavailable in default apt +RUN wget --user=EXAMPLEUSER --password=EXAMPLEPASSWORD -q -N http://example-url/some.deb && \ + dpkg -i --skip-same-version *.deb && \ + rm libarcus2-dev_2.2.0.0.42_amd64.deb && \ + rm libarcus2_2.2.0.0.42_amd64.deb && \ + rm libfptr10-dev_10.9.2.0_amd64.deb && \ + rm libfptr10_10.9.2.0_amd64.deb && \ + rm zint-dev_2.6.7_amd64.deb && \ + rm zint_2.6.7_amd64.deb && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +RUN apt-get update -y && \ + apt-get install debian-keyring -y && \ + apt install build-essential -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +# Download and install cmake 3.15 +RUN wget https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Linux-x86_64.sh && \ + yes Y | bash cmake-3.15.5-Linux-x86_64.sh && \ + echo "PATH=$PATH:/cmake-3.15.5-Linux-x86_64/bin" >> /etc/profile diff --git a/docker/ubuntu-node/.gitlab-ci.yml b/docker/ubuntu-node/.gitlab-ci.yml new file mode 100644 index 0000000..f3837be --- /dev/null +++ b/docker/ubuntu-node/.gitlab-ci.yml @@ -0,0 +1,94 @@ +stages: + - build + - test + - integration + - public + +variables: + VERSION: 1.1.0 + VERSION_NUMBER: "${VERSION}.${CI_PIPELINE_IID}" + STORAGE_FOLDER: "frontend" + +before_script: + - | + if [ -f "package.json" ]; then + export VERSION=$(jq -r ".version // \"${VERSION}\"" package.json) + export STORAGE_FOLDER=$(jq -r ".storage // \"${STORAGE_FOLDER}\"" package.json) + fi + + - export VERSION=$VERSION.$CI_PIPELINE_IID + - export VERSION_NUMBER=$VERSION + - yarn install + +build: + stage: build + image: url-to-ubuntu-image:latest + tags: + - docker + - dind + script: + - CI=false yarn run build + - pwd + - mkdir package + - mkdir -p cmake-build + - cd cmake-build + - cmake ../pkg + - make package + - mv $STORAGE_FOLDER-$VERSION_NUMBER-Linux.deb ../package/ + + artifacts: + paths: + - package + expire_in: 1 day + +test: + stage: test + image: url-to-ubuntu-image:latest + tags: + - docker + - dind + script: + - CI=false yarn test + +integration: + stage: integration + allow_failure: true + needs: + - job: build + artifacts: true + image: url-to-ubuntu-image:latest + tags: + - docker + - dind + script: + - echo " Whoa! Master merge!!! Testing App integration! Wrooom wroom C:" + - wget https://github.com/mozilla/geckodriver/releases/download/v0.30.0/geckodriver-v0.30.0-linux64.tar.gz + - tar -xf geckodriver-v0.30.0-linux64.tar.gz + - mv geckodriver /usr/local/bin/ + - cd ./integration + - npm install + - npm install --save-dev jest + - npm install selenium-webdriver + - npm test tests.test.js + - cd ~ + +public_deb: + stage: public + needs: + - job: build + artifacts: true + image: url-to-ubuntu-image:latest + dependencies: + - build + tags: + - docker + - dind + script: + - echo $VERSION_NUMBER + - echo $CI_PIPELINE_IID + - ls -la package + - sshpass -p $EXP_STORE2_ROOT_PASSWORD ssh -o StrictHostKeyChecking=no $EXP_STORE2_ROOT_USER@storage.url "mkdir -p /home/storage/$STORAGE_FOLDER/$CI_COMMIT_REF_NAME/ASTRA/$VERSION_NUMBER" + - sshpass -p $EXP_STORE2_ROOT_PASSWORD scp package/$STORAGE_FOLDER-$VERSION_NUMBER-Linux.deb $EXP_STORE2_ROOT_USER@storage-url:/home/storage/$STORAGE_FOLDER/$CI_COMMIT_REF_NAME/ASTRA/$VERSION_NUMBER/$STORAGE_FOLDER-$VERSION_NUMBER-Linux.deb + only: + - master + - develop diff --git a/docker/ubuntu-node/Dockerfile b/docker/ubuntu-node/Dockerfile new file mode 100644 index 0000000..dc185c1 --- /dev/null +++ b/docker/ubuntu-node/Dockerfile @@ -0,0 +1,29 @@ +FROM ubuntu:18.04 + +ENV DOCKER_VERSION=19.03.13 +ENV NODE_VERSION=17 + +RUN apt-get update && \ + apt-get -y install git wget bash curl sshpass jq zip unzip build-essential rpm cmake && \ + git --version && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +RUN curl -sL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash && \ + apt-get install -y nodejs && \ + node -v && \ + npm i yarn -g && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -f /var/cache/apt/archives/*.deb && \ + rm -f var/cache/apt/archives/partial/*.deb && \ + rm -f /var/cache/apt/*.bin + +RUN wget https://download.docker.com/linux/static/edge/x86_64/docker-${DOCKER_VERSION}.tgz && \ + tar xvfz docker-${DOCKER_VERSION}.tgz && \ + rm docker-${DOCKER_VERSION}.tgz && \ + cp ./docker/docker /usr/bin/ && \ + docker -v diff --git a/shell/.update-now-playing.sh b/shell/.update-now-playing.sh new file mode 100644 index 0000000..6af9afd --- /dev/null +++ b/shell/.update-now-playing.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +while : +do + echo "$(mpc | head -n 1) " > /home/naiji/.now-playing + sleep 1 +done \ No newline at end of file diff --git a/shell/aptupdater.sh b/shell/aptupdater.sh new file mode 100644 index 0000000..a2efea1 --- /dev/null +++ b/shell/aptupdater.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +tryUpdateFromSd() +{ + local sd_name=$1 + # cut received line by / and take the third found result. (for /dev/sdb1 it's sdb1) + local sd_folder_name=$(echo "${sd_name}" | cut -f 3 -d '/') + exit_condition=false + while [ "$exit_condition" == false ] + do + echo "Do you want to update from ${sd_name}? (Y/n)" + read input_char + if [ "$input_char" == "Y" ] || [ "$input_char" == "y" ] + then + echo " - Create /media/iso-$sd_folder_name" + mkdir /media/iso-$sd_folder_name + echo " - Mount ${sd_name} in /media/iso-$sd_folder_name" + mount $sd_name /media/iso-$sd_folder_name + echo " - Update the package manager." + cp /etc/apt/sources.list /etc/apt/sources-backup.list + mkdir /home/temp-apt + mv /etc/apt/sources.list.d/* /home/temp-apt/ + echo "deb [trusted=yes] file:/media/iso-$sd_folder_name orel main contrib" > /etc/apt/sources.list + echo " - Update packages." + apt update + apt upgrade + echo " - Unmount ${sd_name}" + umount /media/iso-$sd_folder_name + rm -rf /media/iso-$sd_folder_name + echo " - Restore the package manager." + mv /etc/apt/sources-backup.list /etc/apt/sources.list + mv /home/temp-apt/* /etc/apt/sources.list.d/ + rm -rf /home/temp-apt/ + exit_condition=true + exit 0 + elif [ "$input_char" == "N" ] || [ "$input_char" == "n" ] + then + echo "Skip "${sd_name}"" + exit_condition=true + else + echo "Please, type Y to agree or N to decline." + echo "Input Ctrl+C to exit." + fi + done +} + +tryUpdateFromDrive() +{ + # Show devices with fdisk -l + # Filter all lines which contain "sd*" (where * -- any latin letter from B to Z) except for "sd*:" + # cut by space character and take the first found result + # so it is: /dev/sdb1 , for example + sd_arr_raw=$(fdisk -l | egrep "sd[b-zB-Z][^:]" | cut -f 1 -d ' ') + amount_of_sd=0 + for sd_raw in $sd_arr_raw + do + amount_of_sd=$(($amount_of_sd+1)) + echo "${sd_raw}; ID=${amount_of_sd}" + done + if (( $amount_of_sd < 1 )) + then + echo "Couldn't find external drives. Please, make sure they are connected and available." + exit 1 + else + for sd_raw in $sd_arr_raw + do + tryUpdateFromSd "${sd_raw}" + done + exot 0 + fi +} + +tryUpdateFromFile() +{ + exit_condition=false + while [ "$exit_condition" == false ] + do + echo "Do you want to update from ${filename}? (Y/n)" + read input_char + if [ "$input_char" == "y" ] || [ "$input_char" == "Y" ] + then + echo " - Create /media/iso-$filename" + mkdir /media/iso-$filename + echo " - Mount ${filename} in /media/iso-$filename" + mount $filename /media/iso-$filename + echo " - Update the package manager." + cp /etc/apt/sources.list /etc/apt/sources-backup.list + mkdir /home/temp-apt + mv /etc/apt/sources.list.d/* /home/temp-apt/ + echo "deb [trusted=yes] file:/media/iso-$filename orel main contrib" > /etc/apt/sources.list + echo " - Update packages." + apt update + apt upgrade + echo " - Unmount ${filename}" + umount /media/iso-$filename + rm -rf /media/iso-$filename + echo " - Restore the package manager." + mv /etc/apt/sources-backup.list /etc/apt/sources.list + mv /home/temp-apt/* /etc/apt/sources.list.d/ + rm -rf /home/temp-apt/ + exit_condition=true + updated=true + exit 0 + elif [ "$input_char" == "n" ] || [ "$input_char" == "N" ] + then + exit_condition=true + exit 0 + else + echo "Please, type Y to agree or N to decline." + echo "Input Ctrl+C to exit." + fi + done +} + +printHelp() +{ + echo "iso-updater - tool for updating OS packages by apt." + echo " " + echo "USAGE: iso-updater [-OPTIONS...] [ARGS]" + echo " " + echo "OPTIONS:" + echo "-h, --help Show brief instructions." + echo " " + echo "-d, --drive Update the system from drive." + echo " Make sure there is one and only one external drive " + echo " connected. The program will read its partisions and " + echo " try to upgrade the system from each of them." + echo " Thoroughly follow all the instructions after launch." + echo " " + echo "-f, --file [PATH] Update the system from an .iso file." + echo " Make sure there is an .iso file needed to update " + echo " the system from and that the file is being placed " + echo " exactly at PATH. Thoroughly follow all " + echo " the instructions after launch." + echo " " + echo "Example: sudo iso-updater --file /home/user/iso/very-very-cool-iso-file.iso" + echo " " + echo " " + exit 0 +} + +while test $# -gt 0; do + case "$1" in + -h|--help) + printHelp + ;; + -d|--drive) + if [[ $UID != 0 ]]; then + echo "Please, run with sudo:" + echo "sudo $0 $*" + exit 1 + fi + tryUpdateFromDrive + exit 0 + ;; + -f|--file) + if [[ $UID != 0 ]]; then + echo "Please, run with sudo:" + echo "sudo $0 $*" + exit 1 + fi + shift + filename=$1 + if [ ! -f "$filename" ] + then + echo "Couldn't find $filename. Please, use correct filename and make sure it does exist." + exit 1 + fi + tryUpdateFromFile + exit 0 + ;; + esac + echo "Couldn't recognize $1." + echo " " + shift +done + +printHelp +exit 0 diff --git a/shell/buildingcmakes.sh b/shell/buildingcmakes.sh new file mode 100644 index 0000000..2eb29f6 --- /dev/null +++ b/shell/buildingcmakes.sh @@ -0,0 +1,267 @@ +#!/bin/bash + +MAIN_FOLDER="$(readlink -f .)" +if [ -z ${P_BUILD} ]; then "Please set P_BUILD variable."; exit 1; fi; +if [ -z ${P_OUT} ]; then "Please set P_OUT variable."; exit 1; fi; + +printStep() +{ + local comment=$1 + echo " " + echo " !! !! ${comment} !! !! " +} + +buildPackage() +{ + cd ${MAIN_FOLDER} + + echo "cp -rf packages/$1/include/$1 ./include/" + cp -rf packages/$1/include/$1 ./include/ 2>/dev/null + + local headers=`ls include/*.h` + for header in ${headers} + do + if [ -d packages/$1/include/ ] + then + cp -fr ${header} packages/$1/include/ 2>/dev/null + fi + done + + printStep "MAKE $1" + + echo "mkdir ./packages/$1/build" + mkdir -p ./packages/$1/build + + echo "cd ./packages/$1/build" + cd ./packages/$1/build + + cmake .. + + make -j$(nproc) + local libs=`ls *.so` + for lib in ${libs} + do + echo "cp ${lib} ${P_OUT}" + cp ${lib} ${P_OUT} + done + + cd ${MAIN_FOLDER} +} + +clean() +{ + local packages=`ls packages/` + for package in ${packages} + do + rm -rf packages/${package}/build + done + rm -rf ${P_BUILD} +} + +taskqueue() +{ + echo "cp -rf packages/tools/include/tools packages/taskqueue/include/" + cp -rf packages/tools/include/tools packages/taskqueue/include/ + buildPackage "taskqueue" +} + +types() +{ + echo "cp -rf packages/tools/include/tools packages/types/include/" + cp -rf packages/tools/include/tools packages/types/include/ + buildPackage "types" +} + +logger() +{ + echo "cp -rf packages/types/include/types packages/logger/include/" + cp -rf packages/types/include/types packages/logger/include/ + buildPackage "logger" +} + +settings() +{ + echo "cp -rf packages/inithelper/include/inithelper packages/settings/include/" + cp -rf packages/inithelper/include/inithelper packages/settings/include/ + echo "cp -rf packages/logger/include/logger packages/settings/include/" + cp -rf packages/logger/include/logger packages/settings/include/ + buildPackage "settings" +} + +cash() +{ + echo "cp -rf packages/settings/include/settings packages/cash/include/" + cp -rf packages/settings/include/settings packages/cash/include/ + echo "cp -rf packages/inithelper/include/inithelper packages/cash/include/" + cp -rf packages/inithelper/include/inithelper packages/cash/include/ + echo "cp -rf packages/logger/include/logger packages/cash/include/" + cp -rf packages/logger/include/logger packages/cash/include/ + echo "cp -rf packages/tools/include/tools packages/cash/include/" + cp -rf packages/tools/include/tools packages/cash/include/ + buildPackage "cash" +} + +legacy() +{ + mkdir -p packages/legacy/lib + + echo "Resolve .so legacy dependecies" + cp ${P_OUT}/libsettings.so ./packages/legacy/lib/ + cp ${P_OUT}/liblogger.so ./packages/legacy/lib/ + cp ${P_OUT}/libinithelper.so ./packages/legacy/lib/ + cp ${P_OUT}/libtools.so ./packages/legacy/lib/ + cp ${P_OUT}/libtypes.so ./packages/legacy/lib/ + + echo "cp -rf packages/inithelper/include/inithelper/ packages/legacy/legacy/include/" + cp -rf packages/inithelper/include/inithelper/ packages/legacy/legacy/include/ + echo "cp -rf packages/settings/include/settings/ packages/legacy/legacy/include/" + cp -rf packages/settings/include/settings/ packages/legacy/legacy/include/ + + cp -rf packages/legacy/legacy/include ./include/legacy 2>/dev/null + + buildPackage "legacy" + + mv -f packages/legacy/build/selector ${P_OUT} +} + +mfc() +{ + echo "cp -rf packages/logger/include/logger packages/mfc/include/" + cp -rf packages/logger/include/logger packages/mfc/include/ + buildPackage "mfc" +} + +td() +{ + echo "cp -rf packages/tools/include/tools packages/td/include/" + cp -rf packages/tools/include/tools packages/td/include/ + echo "cp -rf packages/taskqueue/include/taskqueue packages/td/include/" + cp -rf packages/taskqueue/include/taskqueue packages/td/include/ + buildPackage "td" +} + +wdm() +{ + mkdir -p packages/wdm/lib + + echo "Resolve .so wdm dependecies" + cp ${P_OUT}/libsettings.so packages/wdm/lib + cp ${P_OUT}/liblogger.so packages/wdm/lib + cp ${P_OUT}/libinithelper.so packages/wdm/lib + cp ${P_OUT}/libtools.so packages/wdm/lib + cp ${P_OUT}/libtypes.so packages/wdm/lib + cp ${P_OUT}/libintface.so packages/wdm/lib + + echo "cp -rf packages/settings/include/settings packages/wdm/include/" + cp -rf packages/settings/include/settings packages/wdm/include/ + echo "cp -rf packages/logger/include/logger packages/wdm/include/" + cp -rf packages/logger/include/logger packages/wdm/include/ + echo "cp -rf packages/inithelper/include/inithelper/ packages/wdm/include/" + cp -rf packages/inithelper/include/inithelper/ packages/wdm/include/ + echo "cp -rf packages/settings/include/settings/ packages/wdm/include/" + cp -rf packages/settings/include/settings/ packages/wdm/include/ + echo "cp -rf packages/legacy/legacy/include/ packages/wdm/include/legacy" + cp -rf packages/legacy/legacy/include/ packages/wdm/include/legacy + echo "cp -rf packages/round/include/round packages/wdm/include/" + cp -rf packages/round/include/round packages/wdm/include/ + buildPackage "wdm" +} + +buildLibraries() +{ + buildPackage "round" + buildPackage "inithelper" + buildPackage "tools" + taskqueue + types + logger + settings + cash + legacy + mfc + td + wdm +} + +core() +{ + mkdir -p ${P_BUILD} + + cmake -B ${P_BUILD} -DP_OUT=${P_OUT} + + cd ${P_BUILD} + make -j$(nproc) +} + +buildAll() +{ + buildLibraries + cd ${MAIN_FOLDER} + + echo " " + echo " " + printStep "CORE" + echo " " + echo " " + + core $2 +} + +main() +{ + # Expecting either 'virt' or 'real' in $2 for core + # 'virt' proceeds to build virtual version with emulators + # 'real' proceeds to build production version with real stuff + # + # !! isn't implemented here !! + + mkdir -p ${P_BUILD} + mkdir -p ${P_OUT} + + flag=$1 + if [ -z ${flag} ] + then + buildAll $2 + elif [ "$flag" == "libs" ] + then + buildLibraries + elif [ "$flag" == "core" ] + then + core $2 + elif [ "$flag" == "clean" ] + then + clean + elif [ "$flag" == "taskqueue" ] + then + taskqueue + elif [ "$flag" == "types" ] + then + types + elif [ "$flag" == "logger" ] + then + logger + elif [ "$flag" == "settings" ] + then + settings + elif [ "$flag" == "cash" ] + then + cash + elif [ "$flag" == "legacy" ] + then + legacy + elif [ "$flag" == "mfc" ] + then + mfc + elif [ "$flag" == "td" ] + then + td + elif [ "$flag" == "wdm" ] + then + wdm + elif [ -d "./packages/${flag}" ] + then + buildPackage $flag + fi +} + +main $1 $2 diff --git a/shell/collectionfor.sh b/shell/collectionfor.sh new file mode 100644 index 0000000..db538e8 --- /dev/null +++ b/shell/collectionfor.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# READ ALL WEBP FILES FROM A FOLDER +# AND DO SOMETHING TO EACH OF THEM + +files=$(ls | grep .webp) + +for file in $files +do + name=$(echo $file | cut -f 1 -d '.') + echo "$name, /emoji/random/$name.webp" +done \ No newline at end of file diff --git a/shell/createswap.sh b/shell/createswap.sh new file mode 100644 index 0000000..4b98a3b --- /dev/null +++ b/shell/createswap.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +echo " -- Create /swapfile" +sudo dd if=/dev/zero of=/swapfile bs=1G count=20 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "-- Activated" diff --git a/shell/dbsshutil.sh b/shell/dbsshutil.sh new file mode 100644 index 0000000..d38aa32 --- /dev/null +++ b/shell/dbsshutil.sh @@ -0,0 +1,143 @@ +#!/bin/bash + +TARGET_REMOTE_USER=adm0 +TARGET_OWNER_USER=owner +TARGET_REMOTE_PATH=/usr/disp/ +export PATH + +if [ "$#" -ne "1" ] && [ "$#" -ne "2" ] && [ "$#" -ne "3" ] +then + echo "$0 --help " + exit 0 +fi + +if [ "$1" == "--help" ] +then + echo "sshdb - utility for simpler database management between local machines vie authorized_keys and rsync." + echo " " + echo "USAGE: sshdb [-FLAG] [ARGS..]" + echo " " + echo "FLAGS:" + echo "--help Show this sheet." + echo " " + echo "-g [IP] [|PATH] Copy database from remote machine to PATH/" + echo " or ./ if the PATH argument is missing." + echo " Example: sshdb -g 10.64.184.60" + echo " sshdb -g 10.64.184.60 ./tempdb/" + echo " " + echo "-l [IP] [|FILE] Copy database from FILE to remote machine" + echo " and execute initial maintenance from" + echo " ./database.sqlite if the FILE argument is missing." + echo " Example: sshdb -l 10.64.184.60" + echo " sshdb -l 10.64.184.60 /home/myuser/folder/folder2/database.sqlite" + echo " " + echo "-c [IP1] [IP2] Copy database from IP1 to IP2 and execute initial maintenance." + echo " Example: sshdb -c 172.23.16.176 10.64.184.60" + echo " " + exit 0 +fi + +# ----------------------------------- + +if [ "$1" == "-g" ] +then + if [ -z "$(cat ~/.ssh/id_rsa.pub)" ] + then + echo "SSH key is needed. Proceed to generating it by ssh-keygen." + exit 1 + fi + + if [ -z "$2" ] + then + echo "Target IP is needed. Run $0 --help." + exit 1 + fi + + LOCAL_DATABASE_PATH="./" + + if [ ! -z "$3" ] + then + LOCAL_DATABASE_PATH="$3" + fi + + if [ ! -d $LOCAL_DATABASE_PATH ] + then + echo "Path $LOCAL_DATABASE_PATH is missing." + exit 1 + fi + + ssh-copy-id "$TARGET_REMOTE_USER"@"$2" + ssh "$TARGET_REMOTE_USER"@"$2" "sudo apt install rsync" + rsync "$TARGET_REMOTE_USER"@"$2":${TARGET_REMOTE_PATH}database.sqlite $LOCAL_DATABASE_PATH + exit 0 +fi + +# ----------------------------------- + +if [ "$1" == "-l" ] +then + if [ -z "$(cat ~/.ssh/id_rsa.pub)" ] + then + echo "SSH key is needed. Proceed to generating it by ssh-keygen." + exit 1 + fi + + if [ -z "$2" ] + then + echo "Target IP is needed. Run $0 --help." + exit 1 + fi + + LOCAL_DATABASE_FILE="./database.sqlite" + + if [ ! -z "$3" ] + then + LOCAL_DATABASE_FILE="$3" + fi + + if [ ! -f $LOCAL_DATABASE_FILE ] + then + echo "File $LOCAL_DATABASE_FILE is missing." + exit 1 + fi + + ssh-copy-id "$TARGET_REMOTE_USER"@"$2" + ssh "$TARGET_REMOTE_USER"@"$2" "sudo apt install rsync" + rsync $LOCAL_DATABASE_FILE "$TARGET_REMOTE_USER"@"$2":/home/"$TARGET_REMOTE_USER"/ + ssh "$TARGET_REMOTE_USER"@"$2" "sudo mv -f ./database.sqlite ${TARGET_REMOTE_PATH}database.sqlite && sudo chown ${owner}:${owner} ${TARGET_REMOTE_PATH}database.sqlite" + exit 0 +fi + +# ----------------------------------- + +if [ "$1" == "-c" ] +then + if [ -z "$(cat ~/.ssh/id_rsa.pub)" ] + then + echo "SSH key is needed. Proceed to generating it by ssh-keygen." + exit 1 + fi + + if [ -z "$2" ] + then + echo "Target IP of the first machine is needed. Run $0 --help." + exit 1 + fi + + if [ -z "$3" ] + then + echo "Target IP of the second machine is needed. Run $0 --help." + exit 1 + fi + + ssh-copy-id "$TARGET_REMOTE_USER"@"$2" + ssh "$TARGET_REMOTE_USER"@"$2" "sudo apt install rsync" + rsync "$TARGET_REMOTE_USER"@"$2":${TARGET_REMOTE_PATH}database.sqlite ./.temp.sqlite + + ssh-copy-id "$TARGET_REMOTE_USER"@"$3" + ssh "$TARGET_REMOTE_USER"@"$3" "sudo apt install rsync" + rsync ./.temp.sqlite "$TARGET_REMOTE_USER"@"$3":/home/"$TARGET_REMOTE_USER"/ + ssh "$TARGET_REMOTE_USER"@"$3" "sudo mv -f ./database.sqlite ${TARGET_REMOTE_PATH}database.sqlite && sudo chown ${owner}:${owner} ${TARGET_REMOTE_PATH}database.sqlite" + rm -f ./.temp.sqlite + exit 0 +fi diff --git a/shell/dronefailures b/shell/dronefailures new file mode 100644 index 0000000..ab85ece --- /dev/null +++ b/shell/dronefailures @@ -0,0 +1,28 @@ +#!/bin/bash + +result=$(curl -X GET https://DOMAIN/api/repos/OWNER/REPO/builds -H "Authorization: Bearer YOURTOKEN") + +if [ -z $1 ] +then + max_counter=3 +else + max_counter=$1 +fi + +counter=0 +echo "${result}" | jq -r '.[] | "\(.status)"' | while read status +do + if [ $status == "failure" ] + then + echo " " + echo " [ build failed! ]" + break + fi + + if [ $counter -eq $max_counter ] + then + break + fi + + counter=$(($counter + 1)) +done \ No newline at end of file diff --git a/shell/indexparser/crontab.conf b/shell/indexparser/crontab.conf new file mode 100644 index 0000000..2166396 --- /dev/null +++ b/shell/indexparser/crontab.conf @@ -0,0 +1 @@ +1 0 * * * PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' bash /home/user/indexparser.sh > /home/user/"Update at $(date).log" diff --git a/shell/indexparser/indexparser.sh b/shell/indexparser/indexparser.sh new file mode 100644 index 0000000..ca3c5fe --- /dev/null +++ b/shell/indexparser/indexparser.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +############################################### +# Upgrade to mainline ######################### + +yes Y | apt update +yes Y | apt upgrade + + + +############################################### +# Parse index.html with latest dev versions ### + +# Here we cut like: +# 0.1.0.1653/ +# to +# 0.1.0.1653 + +wget http://storage-url/package-name/dev/ +recent_develop="$(cat index.html | tail -n 3 | head -n 1 | cut -f 2 -d '>' | cut -f 1 -d '<' | cut -f 1 -d '/')" +echo "Actual dev version: $recent_develop" +rm *index* + + + +############################################### +# Downloading the actual version ############## + +wget http://storage-url/package-name/dev/${recent_develop}/package-name-${recent_develop}-Linux.deb +yes Y | dpkg -i *.deb +rm *.deb + + +systemctl restart package-name \ No newline at end of file diff --git a/shell/lsblkmounter.sh b/shell/lsblkmounter.sh new file mode 100644 index 0000000..a8516a8 --- /dev/null +++ b/shell/lsblkmounter.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +lsblk +echo "" +if [ ! -z $1 ] +then + mkdir -p ~/Mounts + mkdir -p ~/Mounts/$1 + sudo mount /dev/$1 ~/Mounts/$1 +else + echo "" + echo "Input 'NAME' to mount in ~/Mounts/'NAME'" +fi diff --git a/shell/random-line.sh b/shell/random-line.sh new file mode 100644 index 0000000..a0cfd4a --- /dev/null +++ b/shell/random-line.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Output the line of a random even number and the line next before it +number=$((1 + RANDOM % $(expr $(cat filename | wc -l) / 2))) +finalline=$(($number * 2)) +head -n $finalline filename | tail -n 2 diff --git a/shell/rawjsonvaluereader.sh b/shell/rawjsonvaluereader.sh new file mode 100644 index 0000000..ba52f55 --- /dev/null +++ b/shell/rawjsonvaluereader.sh @@ -0,0 +1,52 @@ +# Manually extracting value from settings JSON file + +FOUND_CURSOR=0 +JSON_PATH=/var/pos/config/UiSettings.cfg + +echo " " > /tmp/checkIcons.log + +# Are we launcing it for the first time (assuming currently cursor is visible) +if [[ ! -d /usr/share/icons/default-backup ]] +then + mv /usr/share/icons/default /usr/share/icons/default-backup + cp -r /usr/share/icons/xcursor-transparent /usr/share/icons/default +fi + +if [[ ! -f ${JSON_PATH} ]] +then + echo "${JSON_PATH} is missing" >> /tmp/checkIcons.log + exit 0 +fi + +while IFS= read -r line +do + if [ ! -z "$(echo \"${line}\" | grep \"isCursorVisible\")" ] + then + echo "Found JSON key about cursor visibility" >> /tmp/checkIcons.log + FOUND_CURSOR=1 + fi + + if [ "$FOUND_CURSOR" = "1" ] && [ ! -z "$(echo \"$line\" | grep \"value\")" ] + then + if [ ! -z "$(echo \"${line}\" | grep 0)" ] + then + echo "Disable cursor visibility." >> /tmp/checkIcons.log + rm -rf /usr/share/icons/default + cp -r /usr/share/icons/xcursor-transparent /usr/share/icons/default + fi + + if [ ! -z "$(echo \"${line}\" | grep 1)" ] + then + echo "Enable cursor visibility." >> /tmp/checkIcons.log + rm -rf /usr/share/icons/default + cp -r /usr/share/icons/default-backup /usr/share/icons/default + fi + + fi + + if [ ! -z "$(echo \"${line}\" | grep })" ] + then + FOUND_CURSOR=0 + fi + +done < ${JSON_PATH} diff --git a/shell/sqlite3.sh b/shell/sqlite3.sh new file mode 100644 index 0000000..547e6d2 --- /dev/null +++ b/shell/sqlite3.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +database_path=" " +exit_condition=false + +printLastCassette() +{ + last_id=$(sqlite3 "${database_path}" "SELECT MAX(id) FROM cassete;") + + if [ -z ${last_id} ] + then + echo "Couldn't find cassete entries in \"${database_path}\"." + exit 0 + fi + + printCassetteById $last_id +} + +printCassetteById() +{ + local cassette_id=$1 + + local deposit_ids=$(sqlite3 "${database_path}" "SELECT id FROM deposit WHERE cassete_id = \"${cassette_id}\";") + local validation_id=$(sqlite3 "${database_path}" "SELECT MAX(id) FROM deposit WHERE cassete_id = \"${cassette_id}\";") + + if [ -z ${validation_id} ] + then + echo "Couldn't find deposits by cassete id ${cassette_id} in \"${database_path}\"." + else + + local cassette_sum=0 + local cassette_bills_amount=0 + + for i in $deposit_ids + do + deposit_sum=$(sqlite3 "${database_path}" "SELECT SUM(value) FROM bills WHERE deposit_id = \"${i}\";") + if [ ! -z ${deposit_sum} ] + then + cassette_sum=$(($cassette_sum+$deposit_sum)) + operand=$(sqlite3 "${database_path}" "SELECT COUNT(id) FROM bills WHERE deposit_id = \"${i}\";") + cassette_bills_amount=$(($cassette_bills_amount+$operand)) + else + echo "Deposit ${i} is registered but also empty." + fi + done + + echo "Sum of cash: ${cassette_sum}" + echo "Amount of bills: ${cassette_bills_amount}" + exit_condition=true + fi +} + +if [ ! -f "$database_path" ] +then + echo "Missing \"${database_path}\" database." + exit 0 +fi + +while [ "$exit_condition" == false ] +do + echo "Show information from the last registered cassete? (y/n)" + read input_char + if [ "$input_char" == "y" ] || [ "$input_char" == "Y" ] + then + printLastCassette + elif [ "$input_char" == "n" ] || [ "$input_char" == "N" ] + then + echo "Input specific cassete id: " + read cassette_id + printCassetteById $cassette_id + else + echo "Please input y to show information about the last active cassete or n to specify cassete id on your own." + echo " " + fi +done diff --git a/shell/xinputmapping.sh b/shell/xinputmapping.sh new file mode 100644 index 0000000..fd726c6 --- /dev/null +++ b/shell/xinputmapping.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Gathering IDs of input devices +pointer_ids="$(xinput --list | sed -n '/slave.*pointer/p' | cut -f 2 -d '=' | cut -f 1 -d '[')" +# Get all outputs except for eDP1 +connected_screens="$(xrandr | grep connected | sed '/disconnected/d' | sed '/eDP1/d' | cut -f 1 -d ' ')" +# Trying to map all the input devices to all the outputs +for screen in $connected_screens; do + for pointer in $pointer_ids; do + /usr/bin/xterm -e /bin/bash -c "/usr/bin/xinput --map-to-output $pointer $screen" + /usr/bin/xterm -e /bin/bash -c "/usr/bin/xinput set-props $pointer --type=float 'Coordinate Transformation Matrix' 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" + /usr/bin/xterm -e /bin/bash -c "/usr/bin/xinput list-props $pointer > /tmp/input-xinput-listprops.log 2>/tmp/input-xinput-listprops.err.log" + /usr/bin/xterm -e /bin/bash -c "/usr/bin/xinput list > /tmp/xinput.log" + done +done