Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
aaqibsaeed authored Nov 4, 2016
1 parent 53568cc commit d685574
Showing 1 changed file with 266 additions and 0 deletions.
266 changes: 266 additions & 0 deletions Activity Detection.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,266 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"from scipy import stats\n",
"import tensorflow as tf\n",
"\n",
"%matplotlib inline\n",
"plt.style.use('ggplot')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def read_data(file_path):\n",
" column_names = ['user-id','activity','timestamp', 'x-axis', 'y-axis', 'z-axis']\n",
" data = pd.read_csv(file_path,header = None, names = column_names)\n",
" return data\n",
"\n",
"def feature_normalize(dataset):\n",
" mu = np.mean(dataset,axis = 0)\n",
" sigma = np.std(dataset,axis = 0)\n",
" return (dataset - mu)/sigma\n",
" \n",
"def plot_axis(ax, x, y, title):\n",
" ax.plot(x, y)\n",
" ax.set_title(title)\n",
" ax.xaxis.set_visible(False)\n",
" ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])\n",
" ax.set_xlim([min(x), max(x)])\n",
" ax.grid(True)\n",
" \n",
"def plot_activity(activity,data):\n",
" fig, (ax0, ax1, ax2) = plt.subplots(nrows = 3, figsize = (15, 10), sharex = True)\n",
" plot_axis(ax0, data['timestamp'], data['x-axis'], 'x-axis')\n",
" plot_axis(ax1, data['timestamp'], data['y-axis'], 'y-axis')\n",
" plot_axis(ax2, data['timestamp'], data['z-axis'], 'z-axis')\n",
" plt.subplots_adjust(hspace=0.2)\n",
" fig.suptitle(activity)\n",
" plt.subplots_adjust(top=0.90)\n",
" plt.show()\n",
" \n",
"def windows(data, size):\n",
" start = 0\n",
" while start < data.count():\n",
" yield start, start + size\n",
" start += (size / 2)\n",
"\n",
"def segment_signal(data,window_size = 90):\n",
" segments = np.empty((0,window_size,3))\n",
" labels = np.empty((0))\n",
" for (start, end) in windows(data['timestamp'], window_size):\n",
" x = data[\"x-axis\"][start:end]\n",
" y = data[\"y-axis\"][start:end]\n",
" z = data[\"z-axis\"][start:end]\n",
" if(len(dataset['timestamp'][start:end]) == window_size):\n",
" segments = np.vstack([segments,np.dstack([x,y,z])])\n",
" labels = np.append(labels,stats.mode(data[\"activity\"][start:end])[0][0])\n",
" return segments, labels\n",
"\n",
"def weight_variable(shape):\n",
" initial = tf.truncated_normal(shape, stddev = 0.1)\n",
" return tf.Variable(initial)\n",
"\n",
"def bias_variable(shape):\n",
" initial = tf.constant(0.0, shape = shape)\n",
" return tf.Variable(initial)\n",
"\n",
"def depthwise_conv2d(x, W):\n",
" return tf.nn.depthwise_conv2d(x,W, [1, 1, 1, 1], padding='VALID')\n",
"\n",
"def apply_depthwise_conv(x,kernel_size,num_channels,depth):\n",
" weights = weight_variable([1, kernel_size, num_channels, depth])\n",
" biases = bias_variable([depth * num_channels])\n",
" return tf.nn.relu(tf.add(depthwise_conv2d(x, weights),biases))\n",
" \n",
"def apply_max_pool(x,kernel_size,stride_size):\n",
" return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1], \n",
" strides=[1, 1, stride_size, 1], padding='VALID')"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"dataset = read_data('actitracker_raw.txt')\n",
"dataset['x-axis'] = feature_normalize(dataset['x-axis'])\n",
"dataset['y-axis'] = feature_normalize(dataset['y-axis'])\n",
"dataset['z-axis'] = feature_normalize(dataset['z-axis'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"for activity in np.unique(dataset[\"activity\"]):\n",
" subset = dataset[dataset[\"activity\"] == activity][:180]\n",
" plot_activity(activity,subset)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"segments, labels = segment_signal(dataset)\n",
"labels = np.asarray(pd.get_dummies(labels), dtype = np.int8)\n",
"reshaped_segments = segments.reshape(len(segments), 1,90, 3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"train_test_split = np.random.rand(len(reshaped_segments)) < 0.70\n",
"train_x = reshaped_segments[train_test_split]\n",
"train_y = labels[train_test_split]\n",
"test_x = reshaped_segments[~train_test_split]\n",
"test_y = labels[~train_test_split]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"input_height = 1\n",
"input_width = 90\n",
"num_labels = 6\n",
"num_channels = 3\n",
"\n",
"batch_size = 10\n",
"kernel_size = 60\n",
"depth = 60\n",
"num_hidden = 1000\n",
"\n",
"learning_rate = 0.0001\n",
"training_epochs = 8\n",
"\n",
"total_batchs = reshaped_segments.shape[0] // batch_size"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"X = tf.placeholder(tf.float32, shape=[None,input_height,input_width,num_channels])\n",
"Y = tf.placeholder(tf.float32, shape=[None,num_labels])\n",
"\n",
"c = apply_depthwise_conv(X,kernel_size,num_channels,depth)\n",
"p = apply_max_pool(c,20,2)\n",
"c = apply_depthwise_conv(p,6,depth*num_channels,depth//10)\n",
"\n",
"shape = c.get_shape().as_list()\n",
"c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]])\n",
"\n",
"f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden])\n",
"f_biases_l1 = bias_variable([num_hidden])\n",
"f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1))\n",
"\n",
"out_weights = weight_variable([num_hidden, num_labels])\n",
"out_biases = bias_variable([num_labels])\n",
"y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"loss = -tf.reduce_sum(Y * tf.log(y_))\n",
"optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)\n",
"\n",
"correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))\n",
"accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [],
"source": [
"cost_history = np.empty(shape=[1],dtype=float)\n",
"\n",
"with tf.Session() as session:\n",
" tf.initialize_all_variables().run()\n",
" for epoch in range(training_epochs):\n",
" for b in range(total_batchs): \n",
" offset = (b * batch_size) % (train_y.shape[0] - batch_size)\n",
" batch_x = train_x[offset:(offset + batch_size), :, :, :]\n",
" batch_y = train_y[offset:(offset + batch_size), :]\n",
" _, c = session.run([optimizer, loss],feed_dict={X: batch_x, Y : batch_y})\n",
" cost_history = np.append(cost_history,c)\n",
" print \"Epoch: \",epoch,\" Training Loss: \",c,\" Training Accuracy: \",\n",
" session.run(accuracy, feed_dict={X: train_x, Y: train_y})\n",
" \n",
" print \"Testing Accuracy:\", session.run(accuracy, feed_dict={X: test_x, Y: test_y})"
]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [conda root]",
"language": "python",
"name": "conda-root-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
}
},
"nbformat": 4,
"nbformat_minor": 1
}

0 comments on commit d685574

Please sign in to comment.