Skip to content

Commit

Permalink
Implement README and predictor
Browse files Browse the repository at this point in the history
  • Loading branch information
klawr committed May 23, 2021
1 parent 1a94c3c commit 9aa34e5
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 74 deletions.
6 changes: 6 additions & 0 deletions src/react-native/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
## deepmech react native

This project halted, because the predictor is not sufficiently working now.
The deepmech model is imported and some inference is made, but the preprocessing seems to be off and the alignment on the camera stream seems off.

Hopefully I will have time to come to this at some other time.
97 changes: 23 additions & 74 deletions src/react-native/src/Components/Deepmech/Camera.tsx
Original file line number Diff line number Diff line change
@@ -1,16 +1,11 @@

import React from 'react';
import { Dimensions, Platform, StyleSheet, Text, View } from 'react-native';
import { Button, Dimensions, Platform, StyleSheet, Text, View } from 'react-native';
import Header from '../Header';
import { Camera } from 'expo-camera';
import * as tf from '@tensorflow/tfjs';
import { bundleResourceIO, cameraWithTensors } from '@tensorflow/tfjs-react-native';
import { g2 } from 'g2-module';
import G2SVG from '../G2/G2SVG';
import { mecModelSelectModel } from '../../Redux/MecModelSlice';
import { useSelector } from 'react-redux';
import { IMecModel, mec } from 'mec2-module';
import Svg, { Rect } from 'react-native-svg';
import Predictor from './Predictor';

function Wrap({ navigation, children } = {} as any) {
return <View style={styles.container}>
Expand All @@ -21,6 +16,14 @@ function Wrap({ navigation, children } = {} as any) {
const TensorCamera = cameraWithTensors(Camera);

export default function ACamera({ navigation } = {} as any) {
const [granted, setGranted] = React.useState(false);
const [image, setImage] = React.useState(null);
const [ready, setReady] = React.useState(false);
const modelRef: React.MutableRefObject<tf.LayersModel> = React.useRef(null) as any;
const imageRef: React.MutableRefObject<tf.Tensor3D> = React.useRef(null) as any;

const modelJson = require('../../../assets/models/symbol_detector.json');
const modelWeights = require('../../../assets/models/symbol_detector.bin');

const width = Dimensions.get("window").width;
const height = Dimensions.get("window").height;
Expand All @@ -35,77 +38,24 @@ export default function ACamera({ navigation } = {} as any) {
autorender: true,
}

const model: React.MutableRefObject<tf.LayersModel> = React.useRef(null) as any;
const [mecModel, setMecModel] = React.useState({} as IMecModel);

const [text, setText] = React.useState("");

mec.model.extend(mecModel);
mecModel.init();
const y = Platform.OS === 'android' ? - height * 0.96 : 100;
const g = g2().view({ y, cartesian: true });
mecModel.draw(g);
// Should later be used to be able to cancel animationframe.
// If this were a class it would be:
/**
componentWillUnmount() {
if(this.rafID) {
cancelAnimationFrame(this.rafID);
}
async function onReady(images: IterableIterator<tf.Tensor3D>) {
const loop = () => {
imageRef.current = images.next().value;
requestAnimationFrame(loop);
};
loop();
}
*/
// So it should be implemented using React.useEffect or sth...
let rafId: number;

const modelJson = require('../../../assets/models/symbol_detector.json');
const modelWeights = require('../../../assets/models/symbol_detector.bin');

tf.ready().then(() => {
tf.loadLayersModel(bundleResourceIO(modelJson, modelWeights))
.then(r => model.current = r);
.then(r => modelRef.current = r);
});

async function onReady(images: IterableIterator<tf.Tensor3D>) {
const rgb = tf.tensor1d([0.2989, 0.587, 0.114]);
const loop = async () => {
if (model.current != null) {

let image = images.next().value;

if (image) {
const imageTensor = tf
.sum(image.mul(rgb), 2)
.div(255)
.round()
.expandDims(-1)
.expandDims(0)

const pred = model.current.predict(imageTensor) as tf.Tensor<tf.Rank>;
tf.dispose([imageTensor]);
if (pred) {
setText(tf.argMax(tf.squeeze(pred), -1).greater(0).toString());
// tf.whereAsync(tf.argMax(tf.squeeze(pred), -1).greater(0)).then(a => {
// setMecModel({
// nodes: a.arraySync().map((e: any) => ({
// x: e[1] * image.shape[1] / pred.shape[2]!,
// y: e[0] * image.shape[0] / pred.shape[1]!
// }))
// } as any);
// rafId = requestAnimationFrame(loop);
// });
requestAnimationFrame(loop);
}
}
tf.dispose([image]);
}

};

loop();
function onPress() {
if (imageRef.current) {
setImage(imageRef.current as any);
}
}

const [granted, setGranted] = React.useState(false);

if (Platform.OS === 'android') {
const { check, PERMISSIONS, request, RESULTS } = require('react-native-permissions');
function androidRequest(result: string | typeof RESULTS) {
Expand All @@ -130,10 +80,9 @@ export default function ACamera({ navigation } = {} as any) {
return <Wrap navigation={navigation}>
{granted ?
<View style={styles.container}>
<Predictor image={image} model={modelRef} />
<Button onPress={onPress} title="Test" />
<TensorCamera style={{ ...styles.container, zIndex: 1 }} type={Camera.Constants.Type.back} {...tensorCameraProps} />
<Text style={{ ...styles.container, position: "absolute", backgroundColor: "transparent", zIndex: 20 }}>{text}</Text>
<Svg style={{ ...styles.container, position: "absolute", backgroundColor: "transparent", zIndex: 20 }}><Rect x={60} y={0} width={32} height={32} /></Svg>
<G2SVG style={{ ...styles.container, position: "absolute", backgroundColor: "transparent", zIndex: 20 }} cq={g} />
</View> :
<View style={styles.warning}><Text>No permission to use camera.</Text></View>
}
Expand Down
89 changes: 89 additions & 0 deletions src/react-native/src/Components/Deepmech/Predictor.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import React from 'react';
import { IMecModel, mec } from 'mec2-module';
import { Dimensions, Platform, StyleSheet, Text, View } from 'react-native';
import G2SVG from '../G2/G2SVG';
import Svg, { Rect } from 'react-native-svg';
import { g2 } from 'g2-module';
import * as tf from '@tensorflow/tfjs';
import { ScrollView } from 'react-native-gesture-handler';

export default function App({ image, model } = {} as any) {
const height = Dimensions.get("window").height;
const y = Platform.OS === 'android' ? - height * 0.96 : 100;
const g = React.useRef(g2().view({ y, cartesian: true }).rec({ x: 0, y: 19, b: 32, h: 32 }));
const [text, setText] = React.useState("nothing yet")

if (image && model && model.current) {
// const rgb = tf.tensor1d([0.2989, 0.587, 0.114]);
const imageTensor = tf.tensor(1)
.sub(tf.sum(image, -1).div(255 * 3))
.round()
.expandDims(-1)
.expandDims(0);
const pred = model.current.predict(imageTensor) as tf.Tensor<tf.Rank>;
// tf.dispose([imageTensor]);

tf.argMax(tf.squeeze(pred), -1).greater(0).toString();
tf.whereAsync(tf.argMax(tf.squeeze(pred), -1).greater(0)).then(a => {
const mecModel = {
nodes: a.arraySync().map((e: any) => ({
x: e[1] * image.shape[1] / pred.shape[2]!,
y: e[0] * image.shape[0] / pred.shape[1]!
})),
} as any;

g.current.txt({ x: 50, y: 50, str: "hi" });

mec.model.extend(mecModel);
mecModel.init();
mecModel.draw(g.current);
});
}
tf.dispose([image]);
return <View style={{
...styles.container,
position: "absolute",
zIndex: 20,
}}>
<G2SVG
style={{
...styles.container,
position: "absolute",
backgroundColor: "#f00a",
zIndex: 20,
top: 20,
}}
cq={g.current} />
</View>
// <Text style={{ ...styles.container, position: "absolute", top: 60, backgroundColor: "transparent", zIndex: 20, color: "white" }}>{text}</Text>

// </View>
}

const styles = StyleSheet.create({
warning: {
flex: 1,
alignItems: 'center',
alignContent: 'center',
},
container: {
flex: 1,
flexDirection: 'column',
backgroundColor: '#aaa',
},
preview: {
flex: 1,
justifyContent: 'flex-end',
alignItems: 'center',
},
capture: {
flex: 0,
backgroundColor: '#fff',
borderRadius: 5,
padding: 15,
paddingHorizontal: 20,
alignSelf: 'center',
margin: 20,
},
});

0 comments on commit 9aa34e5

Please sign in to comment.