Frida - hook native method failure on android-Q - python

I have a sample app, which have a int add(int a,int b) in native library.
I use below code to hook the add method:
#!/usr/bin/env python3
import frida
import sys
package_name = "com.sample.hello"
apiname = "add"
def get_messages_from_js(message, data):
if message['type'] == 'send':
print(message['payload'])
else:
print(message)
def instrument_debugger_checks():
hook_code = """
Interceptor.attach(Module.findExportByName(null, "%s"), {
onEnter: function(args) {
console.log("onEnter...");
//send (Memory.readUtf8String (args [1]));
},
onLeave: function(args) {
console.log("onLeave...");
}
});
"""%(apiname)
return hook_code
process = frida.get_usb_device().attach(package_name)
script = process.create_script(instrument_debugger_checks())
script.on('message',get_messages_from_js)
script.load()
sys.stdin.read()
I use below command to get the function name from so:
$ nm -D libnative2.so |grep add
0000000000082504 T _ZNSt6__ndk114__shared_count12__add_sharedEv
0000000000082574 T _ZNSt6__ndk119__shared_weak_count10__add_weakEv
000000000008255c T _ZNSt6__ndk119__shared_weak_count12__add_sharedEv
0000000000042d8c T add
I have tried all these names, result is the same.
But when I run it, I got below error:
{'type': 'error', 'description': 'Error: expected a pointer', 'stack': 'Error: expected a pointer\n at frida/runtime/core.js:387\n at /script1.js:9', 'fileName': 'frida/runtime/core.js', 'lineNumber': 387, 'columnNumber': 1}
What's wrong with my code?

Looks like You have an issue with timing.
Try the following Frida script:
Java.perform(function() {
const System = Java.use("java.lang.System");
const Runtime = Java.use("java.lang.Runtime");
const SystemLoad_2 = System.loadLibrary.overload("java.lang.String");
const VMStack = Java.use("dalvik.system.VMStack");
SystemLoad_2.implementation = function(library) {
console.log("Loading dynamic library => " + library);
try {
const loaded = Runtime.getRuntime().loadLibrary0(VMStack.getCallingClassLoader(), library);
if(library.includes("native2")) {
// here your hook
Interceptor.attach(Module.findExportByName("libnative2.so", "%s"), {
onEnter: function(args) {
console.log("onEnter...");
//send (Memory.readUtf8String (args [1]));
},
onLeave: function(args) {
console.log("onLeave...");
}
});
}
return loaded;
} catch(ex) {
console.log(ex);
}
};
});

Related

How to change ROS Package to subscribe to general IMU message instead of custom one?

I have created a ROS package that use custom IMU messages. Instead of that I would like to change the implementation to fit sensor_msgs/msg/imu instead of custom one. So first I found the nodes inside the package that subscribe to that custom IMU message. This is that node.
#!/usr/bin/python3
import numpy as np
from node_registry.decorators import rosnode, register
from driver_ros_msgs.msg import GImu as Imu
from deepx_ros_common.coordinate_converter import quaternion2rpy
_sub_topic = "/backhoe/imu"
_qos = 1
#rosnode.parameter("period", 0.025)
#rosnode
def node_name():
return "test_imu"
#rosnode.inject
def yaw_deg():
return 0
#rosnode.subscribe(Imu, _sub_topic, _qos)
def subscrbe_cb(msg):
orientation = msg.imu.orientation
ypr_rad = quaternion2rpy(
[orientation.w, orientation.x, orientation.y, orientation.z])[0]
rosnode.logger.info(f"Output ypr: {np.rad2deg(ypr_rad)}")
register()
So is it enough just to change the line _sub_topic = "/backhoe/imu" into _sub_topic = "/sensor_msgs/msg/imu" or need something more in this node? Also I have IMU driver cpp code like this
#include <tf2/LinearMath/Quaternion.h>
#include <string>
#include <memory>
#include "vectornav_driver/imu_driver.hpp"
namespace vectornav_driver
{
ImuDriver::ImuDriver(
const std::string node_name, const rclcpp::NodeOptions & options)
: Node(node_name, options),
header_('$'),
delim_("\r\n"),
status_start_(26),
status_end_(30),
yaw_start_(31),
yaw_end_(39),
pitch_start_(40),
pitch_end_(48),
roll_start_(49),
roll_end_(57)
{
}
ImuDriver::~ImuDriver()
{
}
void ImuDriver::init()
{
rclcpp::Parameter frame_id;
rclcpp::Parameter ip;
rclcpp::Parameter port;
auto flag_frame_id = get_parameter_or(
"frame_id", frame_id,
rclcpp::Parameter("frame_id", "default_link"));
if (!flag_frame_id) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get frame_id, setting: %s",
frame_id.as_string().c_str());
}
frame_id_ = frame_id.as_string();
auto flag_ip = get_parameter_or(
"ip", ip,
rclcpp::Parameter("ip", "192.168.255.1"));
if (!flag_ip) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get ip, setting: %s",
ip.as_string().c_str());
}
auto flag_port = get_parameter_or(
"port", port,
rclcpp::Parameter("port", 10003));
if (!flag_port) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get port, setting: %d",
port.as_int());
}
auto hardware_id = ip.as_string() + ":" + std::to_string(port.as_int());
rclcpp::Parameter buffer_limit;
auto flag_buffer_limit = get_parameter_or(
"buffer_limit", buffer_limit,
rclcpp::Parameter("buffer_limit", 143));
if (!flag_buffer_limit) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get buffer_limit, setting: %d",
buffer_limit.as_int());
}
buffer_limit_ = buffer_limit.as_int();
rclcpp::Parameter diagnostics_enable;
auto flag_diag_enable = get_parameter_or(
"diagnostics.enable",
diagnostics_enable,
rclcpp::Parameter("diagnostics.enable", false));
if (!flag_diag_enable) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get diagnostics.enable, setting: %d",
diagnostics_enable.as_bool());
}
rclcpp::Parameter min_freq;
auto flag_min_freq = get_parameter_or(
"diagnostics.min_freq",
min_freq,
rclcpp::Parameter("diagnostics.min_freq", 40.0));
if (!flag_min_freq) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get min_freq, setting: %f",
min_freq.as_double());
}
rclcpp::Parameter max_freq;
auto flag_max_freq = get_parameter_or(
"diagnostics.max_freq",
max_freq,
rclcpp::Parameter("diagnostics.max_freq", 45.0));
if (!flag_max_freq) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get max_freq, setting: %f",
max_freq.as_double());
}
rclcpp::Parameter period;
auto flag_period = get_parameter_or(
"diagnostics.period",
period,
rclcpp::Parameter("diagnostics.period", 1.0));
if (!flag_period) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get period, setting: %f",
period.as_double());
}
if (diagnostics_enable.as_bool()) {
min_freq_ = min_freq.as_double();
max_freq_ = max_freq.as_double();
auto freq_param = diagnostic_updater::FrequencyStatusParam(
&min_freq_,
&max_freq_);
freq_status_ = std::make_shared<
diagnostic_updater::FrequencyStatus>(freq_param);
sensor_status_ = std::make_shared<
SensorStatus>();
updater_ = std::make_unique<diagnostic_updater::Updater>(
this, period.as_double());
updater_->add(*freq_status_);
updater_->add(*sensor_status_);
updater_->setHardwareID(hardware_id);
}
rclcpp::Parameter is_unittest;
auto flag_is_unittest = get_parameter_or(
"is_unittest",
is_unittest,
rclcpp::Parameter("is_unittest", false));
if (!flag_is_unittest) {
RCLCPP_WARN_ONCE(
get_logger(),
"Could not get is_unittest, setting: %d",
is_unittest.as_bool());
}
tcp_ = std::make_unique<driver_common_utils::TcpIO>(
ip.as_string(), port.as_int());
if (is_unittest.as_bool()) {
rclcpp::Rate rate(5.0);
rate.sleep();
}
auto flag_init = tcp_->init();
if (flag_init) {
rclcpp::QoS qos(rclcpp::KeepLast(1));
pub_imu_ = create_publisher<GImu>("imu", qos);
boost::asio::async_read_until(
tcp_->getSocket(),
boost::asio::dynamic_buffer(buffer_),
delim_,
boost::bind(
&ImuDriver::receiveHandler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
tcp_->run();
} else {
RCLCPP_ERROR_ONCE(get_logger(), "Socket init error");
}
}
void ImuDriver::receiveHandler(
const boost::system::error_code & error, std::size_t bytes_transferred)
{
if (rclcpp::ok()) {
if ((!error.failed() || error == boost::asio::error::message_size)) {
if (buffer_.size() > buffer_limit_) {
buffer_.clear();
}
if (bytes_transferred && buffer_.size()) {
freq_status_->tick();
auto buffer = buffer_.substr(0, bytes_transferred);
auto get_header = buffer.at(0);
if (get_header == header_) {
auto status = decode<uint16_t>(
buffer, status_start_, status_end_, true);
auto yaw = toRadian(
decode<float>(
buffer, yaw_start_, yaw_end_));
auto pitch = toRadian(
decode<float>(
buffer, pitch_start_, pitch_end_));
auto roll = toRadian(
decode<float>(
buffer, roll_start_, roll_end_));
tf2::Quaternion quaternion;
quaternion.setRPY(roll, pitch, yaw);
GImu gimu;
gimu.header.frame_id = frame_id_;
gimu.header.stamp = get_clock()->now();
auto mode_bit_1 = boolBitValue(status, 0);
auto mode_bit_2 = boolBitValue(status, 1);
gimu.mode = (mode_bit_1 * 1) + (mode_bit_2 * 2);
gimu.gnss_fix = boolBitValue(status, 2);
gimu.imu_error = boolBitValue(status, 4);
gimu.magnetometer_pressure_error = boolBitValue(status, 5);
gimu.gnss_error = boolBitValue(status, 6);
gimu.gnss_heading_ins = boolBitValue(status, 8);
gimu.gnss_compass = boolBitValue(status, 9);
gimu.imu.orientation.w = quaternion.getW();
gimu.imu.orientation.x = quaternion.getX();
gimu.imu.orientation.y = quaternion.getY();
gimu.imu.orientation.z = quaternion.getZ();
sensor_status_->sensorStatus(
gimu.mode,
gimu.gnss_fix,
gimu.imu_error,
gimu.magnetometer_pressure_error,
gimu.gnss_error);
pub_imu_->publish(gimu);
}
buffer_.erase(0, bytes_transferred);
}
boost::asio::async_read_until(
tcp_->getSocket(),
boost::asio::dynamic_buffer(buffer_),
delim_,
boost::bind(
&ImuDriver::receiveHandler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
} else {
RCLCPP_ERROR(
get_logger(),
"Receive handler error: %s", error.message().c_str());
}
} else {
RCLCPP_INFO(get_logger(), "Receive handler: node shutdown called");
}
}
} // namespace vectornav_driver
so should be done any changes in the driver too?
Thanks

Unhandled Rejection (TypeError): Cannot read property 'find' of undefined for react-redux

Please help me fix this error : Unhandled Rejection (TypeError): Cannot read property 'find' of undefined.
All code as below ;
import { CART_ADD_ITEM } from '../constants/cartConstants'
export const cartReducer = (state = {cartItems: []}, action) => {
switch (action.type) {
case CART_ADD_ITEM:
const item = action.payload
const existItem = state.cartItems.find(x => x.product === item.product)
if (existItem) {
return {
...state,
cartItems: state.cartItems.map(x =>
x.product === existItem.product ? item : x)
}
} else {
return {
...state,
cartItems: [...state.cartItems, item]
}
}
default:
return state
}
}
this is CartAction
import axios from 'axios'
import { CART_ADD_ITEM } from '../constants/cartConstants'
export const addToCart = (id, qty) => async (dispatch, getState) => {
const {data} = await axios.get(`/api/products/${id}`)
dispatch({
type: CART_ADD_ITEM,
payload: {
product: data._id,
name: data.name,
image: data.image,
price: data.price,
countInStock: data.countInStock,
qty
}
})
localStorage.setItem('cartItems', JSON.stringify(getState().cart.cartItems))
import { createStore, combineReducers, applyMiddleware } from 'redux'
import thunk from 'redux-thunk'
import { composeWithDevTools } from 'redux-devtools-extension'
import { listProducts, listProductDetails } from './reducers/productReducers'
import { cartReducer } from './reducers/cartReducers'
const cartFromLocalStorage = localStorage.getItem('cartITems') ? JSON.parse(localStorage.getItem('cartItems')) : []
**const initialState = {
cart: { cartITems: cartFromLocalStorage }
}**
const reducer = combineReducers({
productsList: listProducts,
productDetails: listProductDetails,
cart: cartReducer,
})
const middleware = [thunk]
const store = createStore(
reducer,
initialState,
composeWithDevTools(applyMiddleware(...middleware))
)
export default store
if found the same error while I was giving "cartFromLocalStorage" in the initialState, and I made mistake in declaring cart: { cartITems: cartFromLocalStorage }instead of cart: { cartItems: cartFromLocalStorage }, i used Capital T in cartItems, so i corrected it, now add to cart is working fine enter image description here

Use Tensorflow model in android

I have a Tensorflow model and I have converted it to ".tflite" but I don't know the way how to implement it on android. I followed the TensorFlow guidelines to implement it in android but since there is no XML code given the TensorFlow website I am struggling to connect it with the front end (XML). I need a clear explanation of how to use my model in android studio using java.
I followed the official instructions given in the TensorFlow website to implement the model in android.
A sample code of how to implement object detection based on tflite model from Tensorflow. I suppose these kinds of answers are not the best answers, but I happened to have a simple example of your exact problem.
Note: it does detect objects and outputs their labels into standard output using Log.d. No boxes or labels will be drawn around detected images.
Download started models and labels from here. Put them into the assets folder of your project.
Java
import android.content.pm.PackageManager;
import android.media.Image;
import android.os.Bundle;
import android.util.Log;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.app.AppCompatActivity;
import androidx.camera.core.Camera;
import androidx.camera.core.CameraSelector;
import androidx.camera.core.ExperimentalGetImage;
import androidx.camera.core.ImageAnalysis;
import androidx.camera.core.ImageProxy;
import androidx.camera.core.Preview;
import androidx.camera.lifecycle.ProcessCameraProvider;
import androidx.camera.view.PreviewView;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.mlkit.common.model.LocalModel;
import com.google.mlkit.vision.common.InputImage;
import com.google.mlkit.vision.objects.DetectedObject;
import com.google.mlkit.vision.objects.ObjectDetection;
import com.google.mlkit.vision.objects.ObjectDetector;
import com.google.mlkit.vision.objects.custom.CustomObjectDetectorOptions;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.ExecutionException;
public class ActivityExample extends AppCompatActivity {
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
private ObjectDetector objectDetector;
private PreviewView prevView;
private List<String> labels;
private int REQUEST_CODE_PERMISSIONS = 101;
private String[] REQUIRED_PERMISSIONS =
new String[]{"android.permission.CAMERA"};
#Override
protected void onCreate(#Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_fullscreen);
prevView = findViewById(R.id.viewFinder);
prepareObjectDetector();
prepareLabels();
if (allPermissionsGranted()) {
startCamera();
} else {
ActivityCompat.requestPermissions(this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS);
}
}
private void prepareLabels() {
try {
InputStreamReader reader = new InputStreamReader(getAssets().open("labels_mobilenet_quant_v1_224.txt"));
labels = readLines(reader);
} catch (IOException e) {
e.printStackTrace();
}
}
private List<String> readLines(InputStreamReader reader) {
BufferedReader bufferedReader = new BufferedReader(reader, 8 * 1024);
Iterator<String> iterator = new LinesSequence(bufferedReader);
ArrayList<String> list = new ArrayList<>();
while (iterator.hasNext()) {
list.add(iterator.next());
}
return list;
}
private void prepareObjectDetector() {
CustomObjectDetectorOptions options = new CustomObjectDetectorOptions.Builder(loadModel("mobilenet_v1_1.0_224_quant.tflite"))
.setDetectorMode(CustomObjectDetectorOptions.SINGLE_IMAGE_MODE)
.enableMultipleObjects()
.enableClassification()
.setClassificationConfidenceThreshold(0.5f)
.setMaxPerObjectLabelCount(3)
.build();
objectDetector = ObjectDetection.getClient(options);
}
private LocalModel loadModel(String assetFileName) {
return new LocalModel.Builder()
.setAssetFilePath(assetFileName)
.build();
}
private void startCamera() {
cameraProviderFuture = ProcessCameraProvider.getInstance(this);
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
bindPreview(cameraProvider);
} catch (ExecutionException e) {
// No errors need to be handled for this Future.
// This should never be reached.
} catch (InterruptedException e) {
}
}, ContextCompat.getMainExecutor(this));
}
private void bindPreview(ProcessCameraProvider cameraProvider) {
Preview preview = new Preview.Builder().build();
CameraSelector cameraSelector = new CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build();
ImageAnalysis imageAnalysis = new ImageAnalysis.Builder()
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
YourAnalyzer yourAnalyzer = new YourAnalyzer();
yourAnalyzer.setObjectDetector(objectDetector, labels);
imageAnalysis.setAnalyzer(
ContextCompat.getMainExecutor(this),
yourAnalyzer);
Camera camera =
cameraProvider.bindToLifecycle(
this,
cameraSelector,
preview,
imageAnalysis
);
preview.setSurfaceProvider(prevView.createSurfaceProvider(camera.getCameraInfo()));
}
private Boolean allPermissionsGranted() {
for (String permission : REQUIRED_PERMISSIONS) {
if (ContextCompat.checkSelfPermission(
this,
permission
) != PackageManager.PERMISSION_GRANTED
) {
return false;
}
}
return true;
}
#Override
public void onRequestPermissionsResult(int requestCode, #NonNull String[] permissions, #NonNull int[] grantResults) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera();
} else {
Toast.makeText(this, "Permissions not granted by the user.", Toast.LENGTH_SHORT)
.show();
finish();
}
}
}
private static class YourAnalyzer implements ImageAnalysis.Analyzer {
private ObjectDetector objectDetector;
private List<String> labels;
public void setObjectDetector(ObjectDetector objectDetector, List<String> labels) {
this.objectDetector = objectDetector;
this.labels = labels;
}
#Override
#ExperimentalGetImage
public void analyze(#NonNull ImageProxy imageProxy) {
Image mediaImage = imageProxy.getImage();
if (mediaImage != null) {
InputImage image = InputImage.fromMediaImage(
mediaImage,
imageProxy.getImageInfo().getRotationDegrees()
);
objectDetector
.process(image)
.addOnFailureListener(e -> imageProxy.close())
.addOnSuccessListener(detectedObjects -> {
// list of detectedObjects has all the information you need
StringBuilder builder = new StringBuilder();
for (DetectedObject detectedObject : detectedObjects) {
for (DetectedObject.Label label : detectedObject.getLabels()) {
builder.append(labels.get(label.getIndex()));
builder.append("\n");
}
}
Log.d("OBJECTS DETECTED", builder.toString().trim());
imageProxy.close();
});
}
}
}
static class LinesSequence implements Iterator<String> {
private BufferedReader reader;
private String nextValue;
private Boolean done = false;
public LinesSequence(BufferedReader reader) {
this.reader = reader;
}
#Override
public boolean hasNext() {
if (nextValue == null && !done) {
try {
nextValue = reader.readLine();
} catch (IOException e) {
e.printStackTrace();
nextValue = null;
}
if (nextValue == null) done = true;
}
return nextValue != null;
}
#Override
public String next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
String answer = nextValue;
nextValue = null;
return answer;
}
}
}
XML layout
<?xml version="1.0" encoding="utf-8"?>
<androidx.camera.view.PreviewView
xmlns:android="http://schemas.android.com/apk/res/android"
android:id="#+id/viewFinder"
android:layout_width="match_parent"
android:layout_height="match_parent" />
Gradle file configuration
android {
...
aaptOptions {
noCompress "tflite" // Your model\'s file extension: "tflite", "lite", etc.
}
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
}
dependencies {
...
implementation 'com.google.mlkit:object-detection-custom:16.0.0'
def camerax_version = "1.0.0-beta03"
// CameraX core library using camera2 implementation
implementation "androidx.camera:camera-camera2:$camerax_version"
// CameraX Lifecycle Library
implementation "androidx.camera:camera-lifecycle:$camerax_version"
// CameraX View class
implementation "androidx.camera:camera-view:1.0.0-alpha10"
}

Uncaught TypeError: Super expression must either be null or a function [duplicate]

This question already has answers here:
Reactjs, Super expression must either be null or a function
(2 answers)
Closed 5 years ago.
So I'm new to React and even though I've found multiple others having the same issue, I still haven't found the error in my code. Therefore I turn to you stackoverflow, you're my only hope!
I am learning, so I wanted to create a simple ReactJS application that handles a HTTP-request. After finishing the code I encountered the error:
Uncaught TypeError: Super expression must either be null or a function, not object
at exports.default (inherits.js?0578:21)
at eval (app.js?71f7:22)
The error persists even though I've tried a lot of different changes and I am fairly certain that it's related to imports/exports as this is what a lot of other sources tell me, although double-checking imports etc. hasn't yielded any results.
The code:
app.js ( handles the rendering of a simple button and should execute a simple GET request on click )
import React, { Component } from 'react'
import { connect } from 'react-redux'
import { createServerSagaRequest } from '../saga/serverSaga'
import { incrRequestAmount, requestSelector } from '../reducer/requestReducer'
const mapStateToProps = (state) => {
return {
getRequestAmount: requestSelector.requests(state),
}
}
const mapDispatchToProps = (dispatch) => {
return {
open: (url, data, action, method) => dispatch(createServerSagaRequest((url, data, action, method))),
requests: () => dispatch(incrRequestAmount()),
}
}
class App extends React {
constructor(props){
super(props)
}
_buttonClick() {
this.props.requests()
this.props.open("http://mvctestproject.local/GetData", "TestDataFraGet", action, "GET")
}
render(){
return (
<button
className="btn btn-default"
onClick={this._buttonClick()}>{this.props.getRequestAmount()}
</button>
)
}
}
export default connect(mapStateToProps, mapDispatchToProps)(App)
serverSaga.js (my saga which can access the reducer and service)
import React, { Component } from 'react'
import { put, call, take, fork, select } from 'redux-saga/effects'
import { callServer } from '../service/serverService'
import { incrRequestAmount, requestSelector } from '../reducer/requestReducer'
export function createServerSagaRequest() {return { type: CREATE_REQUEST }}
function* handleRequest(url, data, action, method, success){
incrRequestAmount()
return yield executeRequest(url, data, action, method, success)
}
function* executeRequest(url, data, action, method, success) {
let response = yield call(callServer, url, method, data)
let responseSuccess = response && response.Succeeded
return
}
export default function* serverSaga(){
yield [
fork(function*(){
yield call (CREATE_REQUEST, handleRequest)
}),
]
}
rootSaga.js ( grouping sagas - in case I made more )
import { fork } from 'redux-saga/effects'
import serverSaga from './serverSaga'
export default function* rootSaga(){
yield [
fork(serverSaga)
]
}
requestReducer.js ( only function is to increment a variable after each request)
import { fromJS } from 'immutable'
export function incrRequestAmount() {return {type: 'INCR_REQUESTS'}}
const initialState = {
requestAmount: 0
}
function requestReducer(state = fromJS(initialState), action){
switch(action.type){
case 'INCR_REQUESTS':
return state.updateIn(["requestAmount"], (requests) => requests++)
default:
return state
}
}
export const requestSelector = {
amount: state => state.requests.get('requestAmount')
}
export default requestReducer
reducers.js ( grouping reducers - in case i made more )
import { combineReducers } from 'redux'
import React, { Component } from 'react'
import requests from './requestReducer'
export default combineReducers({
requests,
})
serverService.js ( handles calls to the server (GET/POST)
import React, { Component } from 'react'
export function callServer(url, bodyData, method){
let methodType = method.toLowerCase()
return new Promise((resolve, reject) => {
let r;
switch (methodType){
case 'post':
r = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
},
body: JSON.stringify(bodyData)
}
break;
case 'get':
r = {
method: 'GET'
}
break;
}
if (r) {
console.log("URL: ", url)
fetch(url, r)
.then((response) => {
console.log("Resp: ", url, response)
return response.json()
})
}
})
}
You need to extend React.Component to create a component, not React itself:
class App extends React {
should be
class App extends React.Component {
, or since you imported Component directly
class App extends Component {

Chatbot using websockets in Angular 4 and Django

I am trying to create real-time chat between Django backend and Angular 4 frontend using PostgreSQL database. Let's assume that I would like to create chatbot for instance like A.L.I.C.E. I am not sure but it seems to me that the most optimal solution would be to use websockets? I am trying to get data from frontend, add it to the PostgreSQL database and then return a response. Content at this moment is not important, I would like to focus only on connection. I am trying to do this in the way shown below without positive results. Any suggestions? In console in Safari I get:
Django:
settings.py:
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "backend.routing.channel_routing",
},
}
routing.py:
from channels.routing import route
from backend.consumers import ws_add, ws_message, ws_disconnect
channel_routing = [
route("websocket.connect", ws_add),
route("websocket.receive", ws_message),
route("websocket.disconnect", ws_disconnect),
]
consumers.py:
# In consumers.py
from channels import Group
# Connected to websocket.connect
def ws_add(message):
# Accept the connection
message.reply_channel.send({"accept": True})
# Add to the chat group
Group("chat").add(message.reply_channel)
# Connected to websocket.receive
def ws_message(message):
Group("chat").send({
"text": "[user] %s" % message.content['text'],
})
# Connected to websocket.disconnect
def ws_disconnect(message):
Group("chat").discard(message.reply_channel)
Angular 4:
websocket.service.ts:
import { Injectable } from '#angular/core';
import * as Rx from 'rxjs/Rx';
#Injectable()
export class WebsocketService {
constructor() { }
private subject: Rx.Subject<MessageEvent>;
public connect(url): Rx.Subject<MessageEvent> {
if (!this.subject) {
this.subject = this.create(url);
console.log("Successfully connected: " + url);
}
return this.subject;
}
private create(url): Rx.Subject<MessageEvent> {
let ws = new WebSocket(url);
let observable = Rx.Observable.create(
(obs: Rx.Observer<MessageEvent>) => {
ws.onmessage = obs.next.bind(obs);
ws.onerror = obs.error.bind(obs);
ws.onclose = obs.complete.bind(obs);
return ws.close.bind(ws);
})
let observer = {
next: (data: Object) => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify(data));
}
}
}
return Rx.Subject.create(observer, observable);
}
}
chat.service.ts:
import { Injectable } from '#angular/core';
import { Observable, Subject } from 'rxjs/Rx';
import { WebsocketService } from './websocket.service';
const CHAT_URL = 'http://0.0.0.0:8000/';
export interface Message {
author: string,
message: string
}
#Injectable()
export class ChatService {
public messages: Subject<Message>;
constructor(wsService: WebsocketService) {
this.messages = <Subject<Message>>wsService
.connect(CHAT_URL)
.map((response: MessageEvent): Message => {
let data = JSON.parse(response.data);
return {
author: data.author,
message: data.message
}
});
}
}
My solution was to change const CHAT_URL = 'http://0.0.0.0:8000/'; toconst CHAT_URL = 'ws://localhost:8000/';

Categories

Resources