Merge upstream new version

This commit is contained in:
Ceda EI 2021-08-25 00:16:57 +05:30
commit 20fb7f9bc2
20 changed files with 1999 additions and 273 deletions

6
.dockerignore Normal file
View File

@ -0,0 +1,6 @@
npm-debug.log
node_modules
*.swp
*.swo
data
*.DS_Store

63
Dockerfile Normal file
View File

@ -0,0 +1,63 @@
FROM node:14.8.0-stretch
RUN mkdir -p /usr/src/app && \
chown node:node /usr/src/app
USER node:node
WORKDIR /usr/src/app
COPY --chown=node:node . .
RUN npm install && \
npm install redis@0.8.1 && \
npm install pg@4.1.1 && \
npm install memcached@2.2.2 && \
npm install aws-sdk@2.738.0 && \
npm install rethinkdbdash@2.3.31
ENV STORAGE_TYPE=memcached \
STORAGE_HOST=127.0.0.1 \
STORAGE_PORT=11211\
STORAGE_EXPIRE_SECONDS=2592000\
STORAGE_DB=2 \
STORAGE_AWS_BUCKET= \
STORAGE_AWS_REGION= \
STORAGE_USENAMER= \
STORAGE_PASSWORD= \
STORAGE_FILEPATH=
ENV LOGGING_LEVEL=verbose \
LOGGING_TYPE=Console \
LOGGING_COLORIZE=true
ENV HOST=0.0.0.0\
PORT=7777\
KEY_LENGTH=10\
MAX_LENGTH=400000\
STATIC_MAX_AGE=86400\
RECOMPRESS_STATIC_ASSETS=true
ENV KEYGENERATOR_TYPE=phonetic \
KEYGENERATOR_KEYSPACE=
ENV RATELIMITS_NORMAL_TOTAL_REQUESTS=500\
RATELIMITS_NORMAL_EVERY_MILLISECONDS=60000 \
RATELIMITS_WHITELIST_TOTAL_REQUESTS= \
RATELIMITS_WHITELIST_EVERY_MILLISECONDS= \
# comma separated list for the whitelisted \
RATELIMITS_WHITELIST=example1.whitelist,example2.whitelist \
\
RATELIMITS_BLACKLIST_TOTAL_REQUESTS= \
RATELIMITS_BLACKLIST_EVERY_MILLISECONDS= \
# comma separated list for the blacklisted \
RATELIMITS_BLACKLIST=example1.blacklist,example2.blacklist
ENV DOCUMENTS=about=./about.md
EXPOSE ${PORT}
STOPSIGNAL SIGINT
ENTRYPOINT [ "bash", "docker-entrypoint.sh" ]
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s \
--retries=3 CMD [ "curl" , "-f" "localhost:${PORT}", "||", "exit", "1"]
CMD ["npm", "start"]

160
README.md
View File

@ -31,7 +31,7 @@ STDOUT. Check the README there for more details and usages.
1. Download the package, and expand it 1. Download the package, and expand it
2. Explore the settings inside of config.js, but the defaults should be good 2. Explore the settings inside of config.js, but the defaults should be good
3. `npm install` 3. `npm install`
4. `npm start` 4. `npm start` (you may specify an optional `<config-path>` as well)
## Settings ## Settings
@ -52,7 +52,7 @@ STDOUT. Check the README there for more details and usages.
When present, the `rateLimits` option enables built-in rate limiting courtesy When present, the `rateLimits` option enables built-in rate limiting courtesy
of `connect-ratelimit`. Any of the options supported by that library can be of `connect-ratelimit`. Any of the options supported by that library can be
used and set in `config.json`. used and set in `config.js`.
See the README for [connect-ratelimit](https://github.com/dharmafly/connect-ratelimit) See the README for [connect-ratelimit](https://github.com/dharmafly/connect-ratelimit)
for more information! for more information!
@ -154,6 +154,28 @@ or post.
All of which are optional except `type` with very logical default values. All of which are optional except `type` with very logical default values.
### MongoDB
To use mongodb storage you must install the 'mongodb' pachage in npm
`npm install mongodb`
Once you've done that, your config section should look like:
``` json
{
"type": "mongodb",
"connectionUrl": "mongodb://localhost:27017/database"
}
```
You can also just set the environment variable for `DATABASE_URL` to your database connection url.
Unlike with postgres you do NOT have to create the table in your mongo database prior to running.
You can also set an `expire` option to the number of seconds to expire keys in.
This is off by default, but will constantly kick back expirations on each view or post.
### Memcached ### Memcached
To use memcache storage you must install the `memcached` package via npm To use memcache storage you must install the `memcached` package via npm
@ -198,6 +220,140 @@ Also, you must create an `uploads` table, which will store all the data for uplo
You can optionally add the `user` and `password` properties to use a user system. You can optionally add the `user` and `password` properties to use a user system.
### Google Datastore
To use the Google Datastore storage system, you must install the `@google-cloud/datastore` package via npm
`npm install @google-cloud/datastore`
Once you've done that, your config section should look like this:
``` json
{
"type": "google-datastore"
}
```
Authentication is handled automatically by [Google Cloud service account credentials](https://cloud.google.com/docs/authentication/getting-started), by providing authentication details to the GOOGLE_APPLICATION_CREDENTIALS environmental variable.
### Amazon S3
To use [Amazon S3](https://aws.amazon.com/s3/) as a storage system, you must
install the `aws-sdk` package via npm:
`npm install aws-sdk`
Once you've done that, your config section should look like this:
```json
{
"type": "amazon-s3",
"bucket": "your-bucket-name",
"region": "us-east-1"
}
```
Authentication is handled automatically by the client. Check
[Amazon's documentation](https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/setting-credentials-node.html)
for more information. You will need to grant your role these permissions to
your bucket:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::your-bucket-name-goes-here/*"
}
]
}
```
## Docker
### Build image
```bash
docker build --tag haste-server .
```
### Run container
For this example we will run haste-server, and connect it to a redis server
```bash
docker run --name haste-server-container --env STORAGE_TYPE=redis --env STORAGE_HOST=redis-server --env STORAGE_PORT=6379 haste-server
```
### Use docker-compose example
There is an example `docker-compose.yml` which runs haste-server together with memcached
```bash
docker-compose up
```
### Configuration
The docker image is configured using environmental variables as you can see in the example above.
Here is a list of all the environment variables
### Storage
| Name | Default value | Description |
| :--------------------: | :-----------: | :-----------------------------------------------------------------------------------------------------------: |
| STORAGE_TYPE | memcached | Type of storage . Accepted values: "memcached","redis","postgres","rethinkdb", "amazon-s3", and "file" |
| STORAGE_HOST | 127.0.0.1 | Storage host. Applicable for types: memcached, redis, postgres, and rethinkdb |
| STORAGE_PORT | 11211 | Port on the storage host. Applicable for types: memcached, redis, postgres, and rethinkdb |
| STORAGE_EXPIRE_SECONDS | 2592000 | Number of seconds to expire keys in. Applicable for types. Redis, postgres, memcached. `expire` option to the |
| STORAGE_DB | 2 | The name of the database. Applicable for redis, postgres, and rethinkdb |
| STORAGE_PASSWORD | | Password for database. Applicable for redis, postges, rethinkdb . |
| STORAGE_USERNAME | | Database username. Applicable for postgres, and rethinkdb |
| STORAGE_AWS_BUCKET | | Applicable for amazon-s3. This is the name of the S3 bucket |
| STORAGE_AWS_REGION | | Applicable for amazon-s3. The region in which the bucket is located |
| STORAGE_FILEPATH | | Path to file to save data to. Applicable for type file |
### Logging
| Name | Default value | Description |
| :---------------: | :-----------: | :---------: |
| LOGGING_LEVEL | verbose | |
| LOGGING_TYPE= | Console |
| LOGGING_COLORIZE= | true |
### Basics
| Name | Default value | Description |
| :----------------------: | :--------------: | :---------------------------------------------------------------------------------------: |
| HOST | 0.0.0.0 | The hostname which the server answers on |
| PORT | 7777 | The port on which the server is running |
| KEY_LENGTH | 10 | the length of the keys to user |
| MAX_LENGTH | 400000 | maximum length of a paste |
| STATIC_MAX_AGE | 86400 | max age for static assets |
| RECOMPRESS_STATIC_ASSETS | true | whether or not to compile static js assets |
| KEYGENERATOR_TYPE | phonetic | Type of key generator. Acceptable values: "phonetic", or "random" |
| KEYGENERATOR_KEYSPACE | | keySpace argument is a string of acceptable characters |
| DOCUMENTS | about=./about.md | Comma separated list of static documents to serve. ex: \n about=./about.md,home=./home.md |
### Rate limits
| Name | Default value | Description |
| :----------------------------------: | :-----------------------------------: | :--------------------------------------------------------------------------------------: |
| RATELIMITS_NORMAL_TOTAL_REQUESTS | 500 | By default anyone uncategorized will be subject to 500 requests in the defined timespan. |
| RATELIMITS_NORMAL_EVERY_MILLISECONDS | 60000 | The timespan to allow the total requests for uncategorized users |
| RATELIMITS_WHITELIST_TOTAL_REQUESTS | | By default client names in the whitelist will not have their requests limited. |
| RATELIMITS_WHITELIST_EVERY_SECONDS | | By default client names in the whitelist will not have their requests limited. |
| RATELIMITS_WHITELIST | example1.whitelist,example2.whitelist | Comma separated list of the clients which are in the whitelist pool |
| RATELIMITS_BLACKLIST_TOTAL_REQUESTS | | By default client names in the blacklist will be subject to 0 requests per hours. |
| RATELIMITS_BLACKLIST_EVERY_SECONDS | | By default client names in the blacklist will be subject to 0 requests per hours |
| RATELIMITS_BLACKLIST | example1.blacklist,example2.blacklist | Comma separated list of the clients which are in the blacklistpool. |
## Author ## Author
John Crepezzi <john.crepezzi@gmail.com> John Crepezzi <john.crepezzi@gmail.com>

19
docker-compose.yaml Normal file
View File

@ -0,0 +1,19 @@
version: '3.0'
services:
haste-server:
build: .
networks:
- db-network
environment:
- STORAGE_TYPE=memcached
- STORAGE_HOST=memcached
- STORAGE_PORT=11211
ports:
- 7777:7777
memcached:
image: memcached:latest
networks:
- db-network
networks:
db-network:

108
docker-entrypoint.js Normal file
View File

@ -0,0 +1,108 @@
const {
HOST,
PORT,
KEY_LENGTH,
MAX_LENGTH,
STATIC_MAX_AGE,
RECOMPRESS_STATIC_ASSETS,
STORAGE_TYPE,
STORAGE_HOST,
STORAGE_PORT,
STORAGE_EXPIRE_SECONDS,
STORAGE_DB,
STORAGE_AWS_BUCKET,
STORAGE_AWS_REGION,
STORAGE_PASSWORD,
STORAGE_USERNAME,
STORAGE_FILEPATH,
LOGGING_LEVEL,
LOGGING_TYPE,
LOGGING_COLORIZE,
KEYGENERATOR_TYPE,
KEY_GENERATOR_KEYSPACE,
RATE_LIMITS_NORMAL_TOTAL_REQUESTS,
RATE_LIMITS_NORMAL_EVERY_MILLISECONDS,
RATE_LIMITS_WHITELIST_TOTAL_REQUESTS,
RATE_LIMITS_WHITELIST_EVERY_MILLISECONDS,
RATE_LIMITS_WHITELIST,
RATE_LIMITS_BLACKLIST_TOTAL_REQUESTS,
RATE_LIMITS_BLACKLIST_EVERY_MILLISECONDS,
RATE_LIMITS_BLACKLIST,
DOCUMENTS,
} = process.env;
const config = {
host: HOST,
port: PORT,
keyLength: KEY_LENGTH,
maxLength: MAX_LENGTH,
staticMaxAge: STATIC_MAX_AGE,
recompressStaticAssets: RECOMPRESS_STATIC_ASSETS,
logging: [
{
level: LOGGING_LEVEL,
type: LOGGING_TYPE,
colorize: LOGGING_COLORIZE,
},
],
keyGenerator: {
type: KEYGENERATOR_TYPE,
keyspace: KEY_GENERATOR_KEYSPACE,
},
rateLimits: {
whitelist: RATE_LIMITS_WHITELIST ? RATE_LIMITS_WHITELIST.split(",") : [],
blacklist: RATE_LIMITS_BLACKLIST ? RATE_LIMITS_BLACKLIST.split(",") : [],
categories: {
normal: {
totalRequests: RATE_LIMITS_NORMAL_TOTAL_REQUESTS,
every: RATE_LIMITS_NORMAL_EVERY_MILLISECONDS,
},
whitelist:
RATE_LIMITS_WHITELIST_EVERY_MILLISECONDS ||
RATE_LIMITS_WHITELIST_TOTAL_REQUESTS
? {
totalRequests: RATE_LIMITS_WHITELIST_TOTAL_REQUESTS,
every: RATE_LIMITS_WHITELIST_EVERY_MILLISECONDS,
}
: null,
blacklist:
RATE_LIMITS_BLACKLIST_EVERY_MILLISECONDS ||
RATE_LIMITS_BLACKLIST_TOTAL_REQUESTS
? {
totalRequests: RATE_LIMITS_WHITELIST_TOTAL_REQUESTS,
every: RATE_LIMITS_BLACKLIST_EVERY_MILLISECONDS,
}
: null,
},
},
storage: {
type: STORAGE_TYPE,
host: STORAGE_HOST,
port: STORAGE_PORT,
expire: STORAGE_EXPIRE_SECONDS,
bucket: STORAGE_AWS_BUCKET,
region: STORAGE_AWS_REGION,
connectionUrl: `postgres://${STORAGE_USERNAME}:${STORAGE_PASSWORD}@${STORAGE_HOST}:${STORAGE_PORT}/${STORAGE_DB}`,
db: STORAGE_DB,
user: STORAGE_USERNAME,
password: STORAGE_PASSWORD,
path: STORAGE_FILEPATH,
},
documents: DOCUMENTS
? DOCUMENTS.split(",").reduce((acc, item) => {
const keyAndValueArray = item.replace(/\s/g, "").split("=");
return { ...acc, [keyAndValueArray[0]]: keyAndValueArray[1] };
}, {})
: null,
};
console.log(JSON.stringify(config));

9
docker-entrypoint.sh Normal file
View File

@ -0,0 +1,9 @@
#!/bin/bash
# We use this file to translate environmental variables to .env files used by the application
set -e
node ./docker-entrypoint.js > ./config.js
exec "$@"

View File

@ -16,33 +16,55 @@ var DocumentHandler = function(options) {
DocumentHandler.defaultKeyLength = 10; DocumentHandler.defaultKeyLength = 10;
// Handle retrieving a document // Handle retrieving a document
DocumentHandler.prototype.handleGet = function(key, response, skipExpire) { DocumentHandler.prototype.handleGet = function(request, response, config) {
const key = request.params.id.split('.')[0];
const skipExpire = !!config.documents[key];
this.store.get(key, function(ret) { this.store.get(key, function(ret) {
if (ret) { if (ret) {
winston.verbose('retrieved document', { key: key }); winston.verbose('retrieved document', { key: key });
response.writeHead(200, { 'content-type': 'application/json' }); response.writeHead(200, { 'content-type': 'application/json' });
response.end(JSON.stringify({ data: ret, key: key })); if (request.method === 'HEAD') {
response.end();
} else {
response.end(JSON.stringify({ data: ret, key: key }));
}
} }
else { else {
winston.warn('document not found', { key: key }); winston.warn('document not found', { key: key });
response.writeHead(404, { 'content-type': 'application/json' }); response.writeHead(404, { 'content-type': 'application/json' });
response.end(JSON.stringify({ message: 'Document not found.' })); if (request.method === 'HEAD') {
response.end();
} else {
response.end(JSON.stringify({ message: 'Document not found.' }));
}
} }
}, skipExpire); }, skipExpire);
}; };
// Handle retrieving the raw version of a document // Handle retrieving the raw version of a document
DocumentHandler.prototype.handleRawGet = function(key, response, skipExpire) { DocumentHandler.prototype.handleRawGet = function(request, response, config) {
const key = request.params.id.split('.')[0];
const skipExpire = !!config.documents[key];
this.store.get(key, function(ret) { this.store.get(key, function(ret) {
if (ret) { if (ret) {
winston.verbose('retrieved raw document', { key: key }); winston.verbose('retrieved raw document', { key: key });
response.writeHead(200, { 'content-type': 'text/plain; charset=UTF-8' }); response.writeHead(200, { 'content-type': 'text/plain; charset=UTF-8' });
response.end(ret); if (request.method === 'HEAD') {
response.end();
} else {
response.end(ret);
}
} }
else { else {
winston.warn('raw document not found', { key: key }); winston.warn('raw document not found', { key: key });
response.writeHead(404, { 'content-type': 'application/json' }); response.writeHead(404, { 'content-type': 'application/json' });
response.end(JSON.stringify({ message: 'Document not found.' })); if (request.method === 'HEAD') {
response.end();
} else {
response.end(JSON.stringify({ message: 'Document not found.' }));
}
} }
}, skipExpire); }, skipExpire);
}; };

View File

@ -0,0 +1,56 @@
/*global require,module,process*/
var AWS = require('aws-sdk');
var winston = require('winston');
var AmazonS3DocumentStore = function(options) {
this.expire = options.expire;
this.bucket = options.bucket;
this.client = new AWS.S3({region: options.region});
};
AmazonS3DocumentStore.prototype.get = function(key, callback, skipExpire) {
var _this = this;
var req = {
Bucket: _this.bucket,
Key: key
};
_this.client.getObject(req, function(err, data) {
if(err) {
callback(false);
}
else {
callback(data.Body.toString('utf-8'));
if (_this.expire && !skipExpire) {
winston.warn('amazon s3 store cannot set expirations on keys');
}
}
});
}
AmazonS3DocumentStore.prototype.set = function(key, data, callback, skipExpire) {
var _this = this;
var req = {
Bucket: _this.bucket,
Key: key,
Body: data,
ContentType: 'text/plain'
};
_this.client.putObject(req, function(err, data) {
if (err) {
callback(false);
}
else {
callback(true);
if (_this.expire && !skipExpire) {
winston.warn('amazon s3 store cannot set expirations on keys');
}
}
});
}
module.exports = AmazonS3DocumentStore;

View File

@ -0,0 +1,89 @@
/*global require,module,process*/
const Datastore = require('@google-cloud/datastore');
const winston = require('winston');
class GoogleDatastoreDocumentStore {
// Create a new store with options
constructor(options) {
this.kind = "Haste";
this.expire = options.expire;
this.datastore = new Datastore();
}
// Save file in a key
set(key, data, callback, skipExpire) {
var expireTime = (skipExpire || this.expire === undefined) ? null : new Date(Date.now() + this.expire * 1000);
var taskKey = this.datastore.key([this.kind, key])
var task = {
key: taskKey,
data: [
{
name: 'value',
value: data,
excludeFromIndexes: true
},
{
name: 'expiration',
value: expireTime
}
]
};
this.datastore.insert(task).then(() => {
callback(true);
})
.catch(err => {
callback(false);
});
}
// Get a file from a key
get(key, callback, skipExpire) {
var taskKey = this.datastore.key([this.kind, key])
this.datastore.get(taskKey).then((entity) => {
if (skipExpire || entity[0]["expiration"] == null) {
callback(entity[0]["value"]);
}
else {
// check for expiry
if (entity[0]["expiration"] < new Date()) {
winston.info("document expired", {key: key, expiration: entity[0]["expiration"], check: new Date(null)});
callback(false);
}
else {
// update expiry
var task = {
key: taskKey,
data: [
{
name: 'value',
value: entity[0]["value"],
excludeFromIndexes: true
},
{
name: 'expiration',
value: new Date(Date.now() + this.expire * 1000)
}
]
};
this.datastore.update(task).then(() => {
})
.catch(err => {
winston.error("failed to update expiration", {error: err});
});
callback(entity[0]["value"]);
}
}
})
.catch(err => {
winston.error("Error retrieving value from Google Datastore", {error: err});
callback(false);
});
}
}
module.exports = GoogleDatastoreDocumentStore;

View File

@ -26,7 +26,7 @@ class MemcachedDocumentStore {
// Save file in a key // Save file in a key
set(key, data, callback, skipExpire) { set(key, data, callback, skipExpire) {
this.client.set(key, data, skipExpire ? 0 : this.expire, (error) => { this.client.set(key, data, skipExpire ? 0 : this.expire || 0, (error) => {
callback(!error); callback(!error);
}); });
} }
@ -34,10 +34,12 @@ class MemcachedDocumentStore {
// Get a file from a key // Get a file from a key
get(key, callback, skipExpire) { get(key, callback, skipExpire) {
this.client.get(key, (error, data) => { this.client.get(key, (error, data) => {
callback(error ? false : data); const value = error ? false : data;
callback(value);
// Update the key so that the expiration is pushed forward // Update the key so that the expiration is pushed forward
if (!skipExpire) { if (value && !skipExpire) {
this.set(key, data, (updateSucceeded) => { this.set(key, data, (updateSucceeded) => {
if (!updateSucceeded) { if (!updateSucceeded) {
winston.error('failed to update expiration on GET', {key}); winston.error('failed to update expiration on GET', {key});

View File

@ -0,0 +1,88 @@
var MongoClient = require('mongodb').MongoClient,
winston = require('winston');
var MongoDocumentStore = function (options) {
this.expire = options.expire;
this.connectionUrl = process.env.DATABASE_URl || options.connectionUrl;
};
MongoDocumentStore.prototype.set = function (key, data, callback, skipExpire) {
var now = Math.floor(new Date().getTime() / 1000),
that = this;
this.safeConnect(function (err, db) {
if (err)
return callback(false);
db.collection('entries').update({
'entry_id': key,
$or: [
{ expiration: -1 },
{ expiration: { $gt: now } }
]
}, {
'entry_id': key,
'value': data,
'expiration': that.expire && !skipExpire ? that.expire + now : -1
}, {
upsert: true
}, function (err, existing) {
if (err) {
winston.error('error persisting value to mongodb', { error: err });
return callback(false);
}
callback(true);
});
});
};
MongoDocumentStore.prototype.get = function (key, callback, skipExpire) {
var now = Math.floor(new Date().getTime() / 1000),
that = this;
this.safeConnect(function (err, db) {
if (err)
return callback(false);
db.collection('entries').findOne({
'entry_id': key,
$or: [
{ expiration: -1 },
{ expiration: { $gt: now } }
]
}, function (err, entry) {
if (err) {
winston.error('error persisting value to mongodb', { error: err });
return callback(false);
}
callback(entry === null ? false : entry.value);
if (entry !== null && entry.expiration !== -1 && that.expire && !skipExpire) {
db.collection('entries').update({
'entry_id': key
}, {
$set: {
'expiration': that.expire + now
}
}, function (err, result) { });
}
});
});
};
MongoDocumentStore.prototype.safeConnect = function (callback) {
MongoClient.connect(this.connectionUrl, function (err, db) {
if (err) {
winston.error('error connecting to mongodb', { error: err });
callback(err);
} else {
callback(undefined, db);
}
});
};
module.exports = MongoDocumentStore;

View File

@ -1,14 +1,16 @@
/*global require,module,process*/ /*global require,module,process*/
var postgres = require('pg');
var winston = require('winston'); var winston = require('winston');
const {Pool} = require('pg');
// create table entries (id serial primary key, key varchar(255) not null, value text not null, expiration int, unique(key)); // create table entries (id serial primary key, key varchar(255) not null, value text not null, expiration int, unique(key));
// A postgres document store // A postgres document store
var PostgresDocumentStore = function (options) { var PostgresDocumentStore = function (options) {
this.expireJS = options.expire; this.expireJS = options.expire;
this.connectionUrl = process.env.DATABASE_URL || options.connectionUrl;
const connectionString = process.env.DATABASE_URL || options.connectionUrl;
this.pool = new Pool({connectionString});
}; };
PostgresDocumentStore.prototype = { PostgresDocumentStore.prototype = {
@ -64,16 +66,15 @@ PostgresDocumentStore.prototype = {
// A connection wrapper // A connection wrapper
safeConnect: function (callback) { safeConnect: function (callback) {
postgres.connect(this.connectionUrl, function (err, client, done) { this.pool.connect((error, client, done) => {
if (err) { if (error) {
winston.error('error connecting to postgres', { error: err }); winston.error('error connecting to postgres', {error});
callback(err); callback(error);
} else { } else {
callback(undefined, client, done); callback(undefined, client, done);
} }
}); });
} }
}; };
module.exports = PostgresDocumentStore; module.exports = PostgresDocumentStore;

1558
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -14,25 +14,21 @@
}, },
"main": "haste", "main": "haste",
"dependencies": { "dependencies": {
"busboy": "0.2.4",
"connect": "^3.7.0",
"connect-ratelimit": "0.0.7", "connect-ratelimit": "0.0.7",
"connect-route": "0.1.5", "connect-route": "0.1.5",
"connect": "3.4.1", "pg": "^8.0.0",
"st": "1.1.0",
"winston": "0.6.2",
"redis-url": "0.1.0",
"redis": "0.8.1", "redis": "0.8.1",
"redis-url": "0.1.0",
"st": "^2.0.0",
"uglify-js": "3.1.6", "uglify-js": "3.1.6",
"busboy": "0.2.4", "winston": "^2.0.0"
"pg": "4.1.1"
}, },
"devDependencies": { "devDependencies": {
"mocha": "^4.0.1" "mocha": "^8.1.3"
}, },
"bundledDependencies": [], "bundledDependencies": [],
"engines": {
"node": "8.1.4",
"npm": "5.2.0"
},
"bin": { "bin": {
"haste-server": "./server.js" "haste-server": "./server.js"
}, },

View File

@ -33,10 +33,7 @@
}, },
"storage": { "storage": {
"type": "memcached", "type": "file"
"host": "127.0.0.1",
"port": 11211,
"expire": 2592000
}, },
"documents": { "documents": {

View File

@ -108,9 +108,7 @@ if (config.rateLimits) {
} }
function raw(request, response) { function raw(request, response) {
var key = request.params.id.split('.')[0]; return documentHandler.handleRawGet(request, response, config);
var skipExpire = !!config.documents[key];
return documentHandler.handleRawGet(key, response, skipExpire);
} }
// first look at API calls // first look at API calls
@ -118,15 +116,21 @@ app.use(route(function(router) {
// get raw documents - support getting with extension // get raw documents - support getting with extension
router.get('/raw/:id', raw); router.get('/raw/:id', raw);
router.get('/:id/raw', raw); router.get('/:id/raw', raw);
router.head('/raw/:id', raw);
router.head('/:id/raw', raw);
// add documents // add documents
router.post('/documents', function(request, response) { router.post('/documents', function(request, response) {
return documentHandler.handlePost(request, response); return documentHandler.handlePost(request, response);
}); });
// get documents // get documents
router.get('/documents/:id', function(request, response) { router.get('/documents/:id', function(request, response) {
var key = request.params.id.split('.')[0]; return documentHandler.handleGet(request, response, config);
var skipExpire = !!config.documents[key]; });
return documentHandler.handleGet(key, response, skipExpire);
router.head('/documents/:id', function(request, response) {
return documentHandler.handleGet(request, response, config);
}); });
})); }));

View File

@ -17,6 +17,8 @@ textarea {
outline: none; outline: none;
resize: none; resize: none;
font-size: 13px; font-size: 13px;
margin-top: 0;
margin-bottom: 0;
} }
/* the line numbers */ /* the line numbers */
@ -31,6 +33,7 @@ textarea {
font-size: 13px; font-size: 13px;
font-family: monospace; font-family: monospace;
text-align: right; text-align: right;
user-select: none;
} }
/* code box when locked */ /* code box when locked */
@ -123,6 +126,7 @@ a.logo:hover {
font-size: 12px; font-size: 12px;
line-height: 14px; line-height: 14px;
padding: 10px 15px; padding: 10px 15px;
user-select: none;
} }
#box3 .label { #box3 .label {

View File

@ -64,7 +64,7 @@ haste_document.prototype.save = function(data, callback) {
type: 'post', type: 'post',
data: data, data: data,
dataType: 'json', dataType: 'json',
contentType: 'application/json; charset=utf-8', contentType: 'text/plain; charset=utf-8',
success: function(res) { success: function(res) {
_this.locked = true; _this.locked = true;
_this.key = res.key; _this.key = res.key;
@ -170,8 +170,7 @@ haste.extensionMap = {
lua: 'lua', pas: 'delphi', java: 'java', cpp: 'cpp', cc: 'cpp', m: 'objectivec', lua: 'lua', pas: 'delphi', java: 'java', cpp: 'cpp', cc: 'cpp', m: 'objectivec',
vala: 'vala', sql: 'sql', sm: 'smalltalk', lisp: 'lisp', ini: 'ini', vala: 'vala', sql: 'sql', sm: 'smalltalk', lisp: 'lisp', ini: 'ini',
diff: 'diff', bash: 'bash', sh: 'bash', tex: 'tex', erl: 'erlang', hs: 'haskell', diff: 'diff', bash: 'bash', sh: 'bash', tex: 'tex', erl: 'erlang', hs: 'haskell',
md: 'markdown', txt: '', coffee: 'coffee', json: 'javascript', md: 'markdown', txt: '', coffee: 'coffee', swift: 'swift'
swift: 'swift'
}; };
// Look up the extension preferred for a type // Look up the extension preferred for a type

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long