Standard Deviation??

Hey guys, sorry for the long code but I need some help. I got everything right so far up to the average but I keep getting the answer 0 for the standard deviation. I have no idea whats wrong.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#include <iostream>
#include <cmath>
using namespace std;
int fillArray(int*,int);
double computeAverage(int*,int);
double displayAvg(double,int);
double computerStandardDeviation(int*,int, double);
 
 
int fillArray(int *scores,int n)
{
cout<<"Enter scores"<<endl;
for(int count=0;count<n;count++)
cin>>scores[count];
 
return n;
}


double computeAverage(int *scores,int n)
{
int sum=0;
double average;
for(int x=0;x<n;x++)
sum+=scores[x];
average=((double)sum)/n;
return average;
}


void displayArray(double average,int n)
{
cout<<"For "<<n<<" students"<<endl;
cout<<"Average = "<<average<<endl;
}


double computerStandardDeviation(int *scores,int n, double average)
{
double total=0;
double sdev, sdevx;
for(int i=1;i<=n;i++)
{
total+=pow((scores[i]-average),2);
}
sdevx = (1/n)*total;
sdev=(double)sqrt(sdevx);

return sdev;
}
 
int main()
{
double sdev, average;
int n;
int *scores;
 
cout<<"How many scores would you like to type?"<<endl;
cin>>n;
 
scores= new int[n];
 
fillArray(scores,n);
average=computeAverage(scores,n);
displayArray(average, n);
sdev= computerStandardDeviation(scores,n,average);
cout<<"Standard Deviation= "<<sdev<<endl;
 
 system("pause");
 
 
return 0;
}





How many scores would you like to type?
5
Enter scores
78
86
64
98
75
For 5 students
Average = 80.2
Standard Deviation= 0
Press any key to continue . . .
Last edited on
try to place (double) before scores[i] in stdDevation function:
1
2
3
4
5
6
7
8
double total=0;
double sdev, sdevx;
for(int i=1;i<=n;i++)
{
total+=pow((double)scores[i])-average,2);  // <--- Type-cast to 'double' of scores[i];
}
sdevx = (1.0 /n)*total;  // <---- replace 1 with 1.0
sdev=(double)sqrt(sdevx);

This because:
- the instruction 'scores[i] - average' the compiler modify it in 'scores.operator[](i).operator-(average) . If you don't typecast to'double' , the operation is calculated on a 'int' . The same for the '1' , the compiler consider it an 'int' and in the code '(1/n)' the compiler 'read' "int(1).operator/ (n)" , instead '1.0' is read as "float(1.0).operator/ ((float)n)".
Last edited on
Topic archived. No new replies allowed.