Add entries into JSON - python

I am working with an API that doesn't have all the information I need in a single call, and I need to the project code it came from into the call that I am making. Right now it appends the project data to the list, but I really need it to be part of the original call. Here is my output now:
[{"committer_email": "justin.m.boucher#example.com", "short_id": "981147b9", "title": "Added .gitignore", "author_email": "justin.m.boucher#example.com", "authored_date": "2017-08-29T08:31:11.000-07:00", "created_at": "2017-08-29T08:31:11.000-07:00", "author_name": "Justin Boucher", "parent_ids": [], "committed_date": "2017-08-29T08:31:11.000-07:00", "message": "Added .gitignore\n", "committer_name": "Justin Boucher", "id": "981147b905913a60796283ce10f915c53679df49"}, {"project_id": "2"}]
Here is the output I want to achieve:
[{"project_id": "2", "committer_email": "justin.m.boucher#example.com", "short_id": "981147b9", "title": "Added .gitignore", "author_email": "justin.m.boucher#example.com", "authored_date": "2017-08-29T08:31:11.000-07:00", "created_at": "2017-08-29T08:31:11.000-07:00", "author_name": "Justin Boucher", "parent_ids": [], "committed_date": "2017-08-29T08:31:11.000-07:00", "message": "Added .gitignore\n", "committer_name": "Justin Boucher", "id": "981147b905913a60796283ce10f915c53679df49"}]
Here is my code so far:
get_commits.py:
import gitlab
import json
gitlab = gitlab.Gitlab()
projects = gitlab.getProjectID()
for i in projects:
api_str = '/projects/' + str(i) + '/repository/commits'
connect = gitlab.connectAPI(apiCall=api_str)
data = json.dumps(connect)
# Append project id to json, since it isn't created
# in the commits from Gitlab
commit = json.loads(data)
commit.append({'project_id': str(i)})
# make it pretty again for Splunk to read
commit = json.dumps(commit)
print commit
gitlab.py
import os
import ConfigParser
import requests
import json
# Setup Splunk Environment
APPNAME = 'splunk_gitlab'
CONFIG = 'appconfig.conf'
SPLUNK_HOME = os.environ['SPLUNK_HOME']
parser = ConfigParser.SafeConfigParser()
class Gitlab():
# # Load Settings
# parser.read(SPLUNK_HOME + '/etc/apps/' + APPNAME + '/local/' + CONFIG)
# if parser.has_section('Authentication'):
# pass
# else:
# parser.read(SPLUNK_HOME + '/etc/apps/' + APPNAME + '/default/' + CONFIG)
#
# GITLAB_URL = parser.get('Authentication', 'GITLAB_URL')
# API_KEY = parser.get('Authentication', 'API_KEY')
# Used for testing only
GITLAB_URL = 'http://<my_address>'
API_KEY = '<my_key>'
API_SERVER = GITLAB_URL + '/api/v4'
# Place api call to retrieve data
def connectAPI(self, apiCall='/projects'):
headers = {
'PRIVATE-TOKEN': self.API_KEY
}
final_url = self.API_SERVER + apiCall
resp = requests.get(final_url, headers=headers)
status_code = resp.status_code
resp = resp.json()
if status_code == 200:
return resp
else:
raise Exception("Something went wrong requesting (%s): %s" % (
resp['errors'][0]['errorType'], resp['errors'][0]['message']))
def getProjectID(self):
connect = self.connectAPI(apiCall='/projects')
data = json.dumps(connect)
projects = json.loads(data)
project_list = []
for i in projects:
project_list.append(i['id'])
return project_list

If you want to add a new element to the first dictionary in the list instead of appending a new dictionary to the list, try using assignment instead of append.
commit[0]['project_id'] = str(i)

Related

Payload from a defined list in Python

I am pretty new to python and I am trying to create a script that will pull data from a ticketing platform.
I got the list of agents and their ids but when I try to pull the data it's giving me this error:
KeyError: 'data'
Is there a way for me to have the parameter "agents": to automatically update using the agent_id list?
Here is the code, I removed the links and the API key for privacy reasons:
import requests
import json
from cgitb import text
from openpyxl import Workbook
import openpyxl
import requests
from datetime import date
from datetime import timedelta
#Agents list
agents_list = ["Agent1", "Agent2", "Agent3"]
agent_id = []
agents_names = []
today = date.today()
yesterday = today - timedelta(days = 1)
start_date = str(yesterday)
end_date = str(yesterday)
def extragere_date_agenti():
url = "https://x.gorgias.com/api/users?limit=100&order_by=name%3Aasc&roles=agent&roles=admin"
headers = {
"accept": "application/json",
"authorization": "Basic"
}
response = requests.get(url, headers=headers)
text_name_id = json.loads(response.text)
for names in text_name_id["data"]:
agent_name = names["firstname"]
agents_id = names["id"]
if agent_name in agents_list:
agents_names.append(agent_name)
agent_id.append(agents_id)
extragere_date_agenti()
def extragere_numere():
url = "https://x.gorgias.com/api/stats/total-messages-sent"
payload = {"filters": {
"period": {
"start_datetime": start_date + "T00:00:00-05:00",
"end_datetime": end_date + "T23:59:59-05:00"
},
"agents": [agent_id], #This is the value that I want to modify
"channels": ["email"]
}}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Basic"
}
response = requests.post(url, json=payload, headers=headers)
text_numere = json.loads(response.text)
numere_finale = text_numere["data"]["data"]["value"]
print(numere_finale)
I've tried to do a for loop but it's giving me the same error. Any suggestions?
First, add the condition to check the response status code
Also, add another condition to prevent this type of key error:
if "data" in text_name_id:
Your Error:
KeyError: 'data'
Means that in text_name_id is no Key named "data".
Difficult to tell you how to fix it without any more info...
Are you sure, that request returns a positiv status?? I see no ErrorHandling, if respone.status_code == 200: should be enough to check.
Are you sure that the response json has a Key named "data"? Try this to set a default if key is missing:
text_name_id.get("data", [{"firstname": "error", "id": 0}])
--- Edit ---
Okay, is that the right one I don't see a "id" or "firstname" key. But if it is the right JSON, than you can't iterate over dict like you did in Python.
To do so you would want to do this:
for key, value in text_name_id['data']['data'].items():
...

How to access information from config.json file to a python file?

The problem is I'm unable to access the information from config.json file to my python file
I have provided the JSON data and python code bellow
I have tried everything in the request module
but I can access the response without the config file but,
I need with config file
The following is a json file
{
"api_data": {
"request_url": "https://newapi.zivame.com/api/v1/catalog/list",
"post_data" : {"category_ids" : "948",
"limit" : "10000"},
"my_headers":{"Content-Type": "application/json"}
},
"redshift":{
"host":"XXX.XXXX.XXX",
"user":"XXXX",
"password":"XXXXXXXX",
"port": 8080,
"db":"XXXX"
},
"s3":{
"access_key":"XXXXXXXXX",
"secret_key":"XXXXXXXXXX",
"region":"XX-XXXXX-1",
"path":"XXXXXXXXXXXX/XXX",
"table":"XXXXXX",
"bucket":"XXXX",
"file": "XXXXXX",
"copy_column": "XXX",
"local_path": "XXXXX"
},
"csv_file": {
"promo_zivame": ""
}
}
and this is the program
#!/usr/bin/python
import json
import psycopg2
import requests
import os
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
with open(BASE_PATH+'/config.json') as json_data_file:
data = json.load(json_data_file)
#api_config = data['api_data']
#redshift = data['redshift']
s3_config = data['s3']
#x = print(api_config.get('request_url'))
class ApiResponse:
#api response
def api_data(self, api_config):
print("starting api_data")
try:
self.ApiResponse = requests.post(api_config['request_url'], api_config['post_data'], api_config['my_headers'])
data_1 = self.ApiResponse
#data = json.dump(self.ApiResponse)
print("API Result Response")
print(())
print(self.ApiResponse)
return (self.ApiResponse)
except Exception:
print("response not found")
return False
def redshift_connect(self, redshift):
try:
# Amazon Redshift connect string
self.con = psycopg2.connect(
host=redshift['host'],
user=redshift['user'],
port=redshift['port'],
password=redshift['password'],
dbname=redshift['db'])
print(self.con)
return self.con
except Exception:
print("Error in Redshift connection")
return False
def main():
c1 = ApiResponse()
api_config = data['api_data']
redshift = data['redshift']
c1.api_data(api_config)
c1.api_data(data)
c1.redshift_connect(redshift)
if __name__=='__main__':
main()
Third argument to requests.post() is json. To provide headers, you need to use the name of the argument explicitly as #JustinEzequiel suggested. See the requests doc here: 2.python-requests.org/en/v1.1.0/user/quickstart/#custom-headers
requests.post(api_config['request_url'], json=api_config['post_data'], headers=api_config['my_headers'])
Borrowing code from https://stackoverflow.com/a/16696317/5386938
import requests
api_config = {
"request_url": "https://newapi.zivame.com/api/v1/catalog/list",
"post_data" : {"category_ids" : "948", "limit" : "10000"},
"my_headers":{"Content-Type": "application/json"}
}
local_filename = 'the_response.json'
with requests.post(api_config['request_url'], json=api_config['post_data'], headers=api_config['my_headers'], stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
saves the response into a file ('the_response.json') you can then pass around. Note the stream=True passed to requests.post

Generate HMAC Sha256 in python 3

I write code to verify an HMAC Auth incoming POST request with JSON to our API. The HMAC I received is OD5ZxL4tdGgWr78e9vO3cYrjuOFT8WOrTbTIuuIH1PQ=
When I try to generate it by my self using Python, it is always different.
Here is the JSON request I received:
{
"shipper_id": 4841,
"status": "Cancelled",
"shipper_ref_no": "",
"tracking_ref_no": "",
"shipper_order_ref_no": "",
"timestamp": "2018-05-23T15:13:28+0800",
"id": "61185ecf-3484-4985-b625-ffe30ba36e28",
"previous_status": "Pending Pickup",
"tracking_id": "NVSGBHINK000000001"
}
And the client secret is 817a3723917f4c7fac24b1f1b324bbab.
The HMAC secret I received is OD5ZxL4tdGgWr78e9vO3cYrjuOFT8WOrTbTIuuIH1PQ=.
Here is the code when I write it in PHP:
<?php
define('CLIENT_SECRET', 'my_shared_secret');
function verify_webhook($data, $hmac_header){
$calculated_hmac = base64_encode(hash_hmac('sha256', $data, CLIENT_SECRET, true));
return ($hmac_header == $calculated_hmac);
}
$hmac_header = $_SERVER['X-NINJAVAN-HMAC-SHA256'];
$data = file_get_contents('php://input');
$verified = verify_webhook($data, $hmac_header);
error_log('Webhook verified: '.var_export($verified, true)); //check error.log to see result
?>
But I have no idea how to do it in Python 3.
In Python 3 you basically want something like the following, taken from how you handle GitHub webhook requests.
import hashlib
import hmac
secret = 'CLIENT_SECRET'
data = rsp.content # assumes you're using requests for data/sig
signature = rsp.headers['X-Something-Signature']
signature_computed = 'sha1=' + hmac.new(
key=secret.encode('utf-8'),
msg=data.encode('utf-8'),
digestmod=hashlib.sha1
).hexdigest()
if not hmac.compare_digest(signature, signature_computed):
log("Invalid payload")
If you want to recreate the hashing code from PHP to Python do it thusly:
def create_signature(key, data):
sig_hash = hmac.new(key.encode('utf8'), data.encode('utf8'), hashlib.sha256).digest()
base64_message = base64.b64encode(sig_hash).decode()
return base64_message
This will create the signature that should match what your PHP code is creating. Just compare the signature to what is sent in the header.
from collections import OrderedDict
params = orderedDict()
params["shipper_id"] = 4841
params["status"] = "Cancelled"
params["shipper_ref_no"] = ""
params["tracking_ref_no"] = ""
params["shipper_order_ref_no"] = ""
params["timestamp"] = "2018-05-23T15:13:28+0800"
params["id"] = "61185ecf-3484-4985-b625-ffe30ba36e28"
params["previous_status"] = "Pending Pickup"
params["tracking_id"] = "NVSGBHINK000000001"
mes = json(params, separator = (";",",")).highdigest()
sighnature = hmac.new(mes, sha256)
# separators = (";",",") - i'm not shure
params['sighnature'] = sighnature
r = response.post(url,params,sighnature)
print(r.text())

how to pass json object directly to train in rasa nlu from python

I am using rasa nlu to train data. as per the documentation in http://nlu.rasa.ai/python.html , following code has to be used to train data that exists in the file demo-rasa.json
from rasa_nlu.converters import load_data
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.model import Trainer
training_data = load_data('data/examples/rasa/demo-rasa.json')
trainer = Trainer(RasaNLUConfig("sample_configs/config_spacy.json"))
trainer.train(training_data)
model_directory = trainer.persist('./projects/default/')
But instead how do we read data from a json object for training.
If you look at the implementation of load_data, it performs two steps:
guess the file format
load the file using the appropriate loading method
The simplest solution would be to write your json object into a file or StringIO object.
Alternatively, you could pick the specific loading function you need, for example load_rasa_data and seperate the file reading from it. For this example, you could probably just take the whole function and remove the line data = _read_json_from_file(filename).
I am somewhat surprised to see that currently there is no way to read an already loaded json object. If you decide to adapt the functions to this, you might consider writing a pull request for it.
I have made a flask app which takes the JSON object from request body, instead of reading it from file.
This code converts an existing LUIS json using spaCy for entities and sklearn-crfsuite for intent recognition.
from flask import Flask, jsonify, request
from flask_cors import CORS
import json, os, msvcrt, psutil, subprocess, datetime
app = Flask(__name__)
CORS(app)
with app.app_context():
with app.test_request_context():
#region REST based RASA API
serverExecutablePID = 0
hasAPIStarted = False
configFileDirectory = "C:\\Code\\RasaAPI\\RASAResources\\config"
chitChatModel = "ChitChat"
assetsDirectory = "C:\\Code\\RasaAPI\\RASAResources"
def createSchema(SchemaPath, dataToBeWritten):
try:
#write LUIS or RASA JSON Schema in json file locking the file to avoid race condition using Python's Windows msvcrt binaries
with open(SchemaPath, "w") as SchemaCreationHandle:
msvcrt.locking(SchemaCreationHandle.fileno(), msvcrt.LK_LOCK, os.path.getsize(SchemaPath))
json.dump(dataToBeWritten, SchemaCreationHandle, indent = 4, sort_keys=False)
SchemaCreationHandle.close()
#Check if written file actually exists on disk or not
doesFileExist = os.path.exists(SchemaPath)
return doesFileExist
except Exception as ex:
return str(ex.args)
def appendTimeStampToModel(ModelName):
return ModelName + '_{:%Y%m%d-%H%M%S}.json'.format(datetime.datetime.now())
def appendTimeStampToConfigSpacy(ModelName):
return ModelName + '_config_spacy_{:%Y%m%d-%H%M%S}.json'.format(datetime.datetime.now())
def createConfigSpacy(ModelName, DataPath, ConfigSpacyPath, TrainedModelsPath, LogDataPath):
try:
with open(ConfigSpacyPath, "w") as configSpacyFileHandle:
msvcrt.locking(configSpacyFileHandle.fileno(), msvcrt.LK_LOCK, os.path.getsize(ConfigSpacyPath))
configDataToBeWritten = dict({
"project": ModelName,
"data": DataPath,
"path": TrainedModelsPath,
"response_log": LogDataPath,
"log_level": "INFO",
"max_training_processes": 1,
"pipeline": "spacy_sklearn",
"language": "en",
"emulate": "luis",
"cors_origins": ["*"],
"aws_endpoint_url": None,
"token": None,
"num_threads": 2,
"port": 5000
})
json.dump(configDataToBeWritten, configSpacyFileHandle, indent = 4, sort_keys=False)
return os.path.getsize(ConfigSpacyPath) > 0
except Exception as ex:
return str(ex.args)
def TrainRASA(configFilePath):
try:
trainingString = 'start /wait python -m rasa_nlu.train -c ' + '\"' + os.path.normpath(configFilePath) + '\"'
returnCode = subprocess.call(trainingString, shell = True)
return returnCode
except Exception as ex:
return str(ex.args)
def StartRASAServer(configFileDirectory, ModelName):
#region Server starting logic
try:
global hasAPIStarted
global serverExecutablePID
#1) for finding which is the most recent config_spacy
root, dirs, files = next(os.walk(os.path.normpath(configFileDirectory)))
configFiles = [configFile for configFile in files if ModelName in configFile]
configFiles.sort(key = str.lower, reverse = True)
mostRecentConfigSpacy = os.path.join(configFileDirectory, configFiles[0])
serverStartingString = 'start /wait python -m rasa_nlu.server -c ' + '\"' + os.path.normpath(mostRecentConfigSpacy) + '\"'
serverProcess = subprocess.Popen(serverStartingString, shell = True)
serverExecutablePID = serverProcess.pid
pingReturnCode = 1
while(pingReturnCode):
pingReturnCode = os.system("netstat -na | findstr /i 5000")
if(pingReturnCode == 0):
hasAPIStarted = True
return pingReturnCode
except Exception as ex:
return jsonify({"message": "Failed because: " + str(ex.args) , "success": False})
#endregion
def KillProcessWindow(hasAPIStarted, serverExecutablePID):
if(hasAPIStarted == True and serverExecutablePID != 0):
me = psutil.Process(serverExecutablePID)
for child in me.children():
child.kill()
#app.route('/api/TrainRASA', methods = ['POST'])
def TrainRASAServer():
try:
#get request body of POST request
postedJSONData = json.loads(request.data, strict = False)
if postedJSONData["data"] is not None:
print("Valid data")
#region JSON file building logic
modelName = postedJSONData["modelName"]
modelNameWithExtension = appendTimeStampToModel(modelName)
schemaPath = os.path.join(assetsDirectory, "data", modelNameWithExtension)
print(createSchema(schemaPath, postedJSONData["data"]))
#endregion
#region config file creation logic
configFilePath = os.path.join(assetsDirectory, "config", appendTimeStampToConfigSpacy(modelName))
logsDirectory = os.path.join(assetsDirectory, "logs")
trainedModelDirectory = os.path.join(assetsDirectory, "models")
configFileCreated = createConfigSpacy(modelName, schemaPath, configFilePath, trainedModelDirectory, logsDirectory)
#endregion
if(configFileCreated == True):
#region Training RASA NLU with schema
TrainingReturnCode = TrainRASA(configFilePath)
#endregion
if(TrainingReturnCode == 0):
return jsonify({"message": "Successfully trained RASA NLU with modelname: " + modelName, "success": True})
# KillProcessWindow(hasAPIStarted, serverExecutablePID)
# serverStartingReturnCode = StartRASAServer(configFileDirectory, modelName)
# #endregion
# if serverStartingReturnCode == 0:
# return jsonify({"message": "Successfully started RASA server on port 5000", "success": True})
# elif serverStartingReturnCode is None:
# return jsonify({"message": "Could not start RASA server, request timed out", "success": False})
else:
return jsonify({"message": "Soemthing wrong happened while training RASA NLU!", "success": False})
else:
return jsonify({"message": "Could not create config file for RASA NLU", "success": False})
#throw exception if request body is empty
return jsonify({"message": "Please enter some JSON, JSON seems to be empty", "success": False})
except Exception as ex:
return jsonify({"Reason": "Failed because" + str(ex.args), "success": False})
#app.route('/api/StopRASAServer', methods = ['GET'])
def StopRASAServer():
try:
global serverExecutablePID
if(serverExecutablePID != 0 or serverExecutablePID != None):
me = psutil.Process(serverExecutablePID)
for child in me.children():
child.kill()
return jsonify({"message": "Server stopped....", "success": True})
except Exception as ex:
return jsonify({"message": "Something went wrong while shutting down the server because: " + str(ex.args), "success": True})
if __name__ == "__main__":
StartRASAServer(configFileDirectory, chitChatModel)
app.run(debug=False, threaded = True, host='0.0.0.0', port = 5050)
There is simple way of doing it, but due to poor code documentation of RASA it is difficult to find.
You will have to create a json in the following format.
training_data = {'rasa_nlu_data': {"common_examples": training_examples,
"regex_features": [],
"lookup_tables": [],
"entity_synonyms": []
}}
In this JSON training_examples is a list and it should contain the data as represented below.
training_examples = [
{
"intent": "greet",
"text": "Hello"
},
{
"intent": "greet",
"text": "Hi, how are you ?"
},
{
"intent": "sad",
"text": "I am not happy with the service"
},
{
"intent": "praise",
"text": "You're a genius"
}
]
with this now, you can train it like this :)
from rasa.nlu import config
# Even config can also be loaded from dict like this
def get_train_config():
return {'language': 'en',
'pipeline': [
{'name': 'WhitespaceTokenizer'},
{'name': 'ConveRTFeaturizer'},
{'name': 'EmbeddingIntentClassifier'}
],
'data': None,
'policies': [
{'name': 'MemoizationPolicy'},
{'name': 'KerasPolicy'},
{'name': 'MappingPolicy'}
]
}
trainer = Trainer(config._load_from_dict(get_train_config()))
interpreter = trainer.train(data)

Can anyone give a snapshot example of elastic-search by using python?

I'm using python to access elasticsearch cluster. Now I want to backup my index by using snapshot.
The most difficult thing is that: the python-elasticsearch's doc just give me a API description. there is no example to show me how to create snapshot. I tried some parameters, but failed. Can anyone give a snapshot example of elastic-search by using python? The following is my code:
from elasticsearch import Elasticsearch
es = Elasticsearch()
snapshot_body = {
"type": "url",
"settings": {
"url": "http://download.elasticsearch.org/definitiveguide/sigterms_demo/"
}
}
body = {"snapshot": snapshot_body}
es.snapshot.create_repository(repository='test', body=body)
Your repository creation is almost correct, you don't need the line body = {"snapshot": snapshot_body}, simply create your repository like this:
es.snapshot.create_repository(repository='test', body=snapshot_body)
Now in order to create a snapshot, all you have to do is this:
es.snapshot.create(repository='test', snapshot='my_snapshot')
If you want to store only a few indices and not all you can also provide a body like this:
index_body = {
"indices": "index_1,index_2"
}
es.snapshot.create(repository='test', snapshot='my_snapshot', body=index_body)
Save the following sample Python code as a Python file, such as register-repo.py. The client requires the AWS SDK for Python (Boto3), requests and requests-aws4auth packages. The client contains commented-out examples for other snapshot operations.
import boto3
import requests
from requests_aws4auth import AWS4Auth
host = '' # include https:// and trailing /
region = '' # e.g. us-west-1
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
# Register repository
path = '_snapshot/my-snapshot-repo-name' # the Elasticsearch API endpoint
url = host + path
payload = {
"type": "s3",
"settings": {
"bucket": "s3-bucket-name",
# "endpoint": "s3.amazonaws.com", # for us-east-1
"region": "us-west-1", # for all other regions
"role_arn": "arn:aws:iam::123456789012:role/TheSnapshotRole"
}
}
headers = {"Content-Type": "application/json"}
r = requests.put(url, auth=awsauth, json=payload, headers=headers)
print(r.status_code)
print(r.text)
# # Take snapshot
#
# path = '_snapshot/my-snapshot-repo/my-snapshot'
# url = host + path
#
# r = requests.put(url, auth=awsauth)
#
# print(r.text)
#
# # Delete index
#
# path = 'my-index'
# url = host + path
#
# r = requests.delete(url, auth=awsauth)
#
# print(r.text)
#
# # Restore snapshot (all indices except Kibana and fine-grained access control)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# payload = {
# "indices": "-.kibana*,-.opendistro_security",
# "include_global_state": false
# }
#
# headers = {"Content-Type": "application/json"}
#
# r = requests.post(url, auth=awsauth, json=payload, headers=headers)
#
# # Restore snapshot (one index)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# payload = {"indices": "my-index"}
#
# headers = {"Content-Type": "application/json"}
#
# r = requests.post(url, auth=awsauth, json=payload, headers=headers)
#
# print(r.text)
DONT USE THIS IN US EAST 1 then you have to use this
Important
If the S3 bucket is in the us-east-1 region, you must use "endpoint": "s3.amazonaws.com" instead of "region": "us-east-1".
To enable server-side encryption with S3-managed keys for the snapshot repository, add "server_side_encryption": true to the "settings" JSON.
https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains-snapshots.html#es-managedomains-snapshot-registerdirectory

Categories

Resources