@article { author = {Roudmajani, Mohsen and Ghonsoli, Behazd and Ghabol, Ehsan and Yahaghi, Mohammad Jafar}, title = {An Analysis of the Reading Section in Ferdowsi Persian Proficiency Test for Non-native Speakers}, journal = {New Literary Studies}, volume = {51}, number = {2}, pages = {1-26}, year = {2018}, publisher = {Ferdowsi University of Mashhad}, issn = {2783-252X}, eissn = {2783-252X}, doi = {10.22067/jls.v0i0.71976}, abstract = {Introduction People are not born with reading ability and it is a new phenomenon in human history. You can still find some people who cannot read. But these days weakness in reading ability can cause serious problems for us. Also Reading ability and academic life are indispensable and many of the students activities are interweaved with reading. Books, essays, instructions, research reports are just a few examples of the activities in academic environment which need reading ability. Because of the importance of reading ability most of Persian proficiency tests include a reading section. Ferdowsi University’s Persian proficiency test which is designed to test the language proficiency of non-Persian students, include reading assessment. This test is official test of Ferdowsi University and it is held twice a year in Iran and some cities of Iraq. In this research we try to study the reading part of this test. We actually try to answer two questions: 1- What is the reliability of the reading section of this test? 2- What is the relation between test items and test takers abilities? At the end we try to give some recommendation to improve this test’s reliability. We will do this by trying to define the reading skill’s construct and we also use language task framework which is a powerful tool to study reading activities in academic environment. Review of Literature The history of testing can be divided into three periods: the pre-academic period, the psychometric period; the structuralism and the psychology-sociology of language (Sposlky, 1977, cited in Davis, 2008). The first period goes back to more than two thousand years ago, when the empires used the test to select new members. The second period is related to the time when psychometrists devised new statistical tools for measuring test reliability which coincided with the constructivist flow in linguistics and led to the development of a discrete point measurement approach. However, in the late twentieth century, we see another approach, called integrated measurement, which emphasized language assessment in context, and, unlike the structuralists, did not consider language division into smaller and discrete points as efficient methods of testing. In the second half of the twentieth century, with the development of scientific tools, companies emerged that produced mass-scale testing, thereby transforming testing into an industry (Spolsky, 2008). One of these companies is ETS. In 1964, on the basis of a discrete point assessment approach, the company designed the first proficiency test and little by little made changes to its test structure. However, in a project called TOEFL 2000, ETS decided to make major changes to this proficiency test. Reading Skills Assessment was also a part of the TOEFL 2000 project, which resulted in Assessing Second Language Academic Reading from a Communicative Perspective: Relevance for TOEFL 2000 (Hudson, 1996) and the Reading Skills Framework (Enright et al., 2000). Attempts have been made in Farsi to design a sufficiency test (Mousavi, 1999; Ghonsooli, 2010; Mamghmani, 2010; Jalili, 2011; Golpour, 2015). These studies devote part of their test to reading skills. Although what distinguishes the present study from all the research done is the precise definition of reading skill constructs, providing a model for measuring this skill, as well as a task-based view of the test design. Method First in order to understand what reading ability is we tried to define its constructs. Based on the latest model we found that reading consists of lower level processes and higher level processes. Lower level processes include word recognition, syntax parsing and meaning proposition encoding which are mainly automatic. On the other hand, the higher level processes consist of text model, situational model and executive control mechanism. In order to understand a text, the reader use higher level processes and lower level processes interchangeably. Also a framework was introduced to analyze the language tasks in an academic environment. This framework helps us to understand and simulate language tasks. It has three different parts called situation, text content and test features. This framework is designed based on communicative perspectives in language learning and assessment. The situation explains the context of the task in which it is performed. Its participents, purpose, content, setting and register. The text content include grammatical, functional and discourse features of the text and test features section describes the factors such as type of questions, type of response and scoring procedures. The data in this research was gathered by a test which was held in Ferdowsi International center for teaching Persian to nonnative speakers. This test was performed to see to what extent test takers, have the ability to cope with academic activities. The cut score for this test is 60 out of 100. The test has five section including reading, listening, writing, speaking and structures. 90 test takers took part in the test. 33 of them were female and 57 were male. Most of the participants were Iraqi (63) and the rest of them were from Indonesia, Lebanon, Syria, Australia, South Korea, Pakistan, and Yemen. 38 of the examinees were studying in different field of engineering, 17 of them were studying humanities, 13 sciences and 7 of them were students of medical sciences. The field of studying for the rest of the examinees was not known. The Persian proficiency test was held in Mordad 25th, 1996 at 8 o’clock and it took about 3 and half an hour. As the reading section was the second part of the test it started at half past eight. This part of the test took 60 minutes and the students answered 25 questions. Students were in 10 different rooms and there were 9 students in each class. After the test was performed the papers were marked the data was ready for Rasch analysis. The step was done with the help of WINSTEPS software. Results and Discussion The analysis showed that all the questions except item number 21 has appropriate infit and outfit amount. It shows that the test is unidimensional and it is possible to be analyzed by Rasch model. The outfit for item number 21 is 1.53 which is more than the standard amount. This question is multiple choice and it is also so simple. It seems that the item is ambiguous. The results also showed that the test reliability is 76%. We know that reading test reliability should be more than 90%. So the test needs to improve its reliability.  Also the Write map showed that the test need some more difficult items because for the 14 top students in the exam there are only two items. And also there are just 4 items appropriate for the 36 top students of the test. The exam cannot differentiate between intermediate and advanced students. Conclusion The results showed that the test reliability is not high so the test designers should improve their reliability for example by adding more question and items. On the other hand the item- examinee map showed that the test assess elementary or advanced students reading ability it means that this test is not appropriate to assess advanced student’s reading skill. The test includes three texts and 25 items. Compared to TOEFL exam which has about 40 items, the test needs more questions. In order to improve its reliability, more items can be added to the test. But considering type of information requested the test covers different levels. Which is a good aspect of this test. Also each text has about 500 words which is somehow appropriate for assessing academic reading skill. The results showed that the test is easy. The reason for this is the matching type of the questions. For fourteen questions of the test the students should locate and cycle the answers. We know that usually these type of questions are not so difficult. So in order to improve the test difficulty other matching types such as integrative and productive questions should be added. From the reading purpose perspective, the test questions mainly follow two purposes, reading to find information and reading for general information. The test needs items with other types of purposes such as reading to learn and reading to integrate information.}, keywords = {Reading skill,Persian proficiency test,task base asessment,Proficiency base assessment,test realibility}, title_fa = {بررسی بخش خواندن در آزمون بسندگیِ زبان فارسی دانشگاه فردوسی مشهد}, abstract_fa = {مهارتِ خواندن و درک مطلب و محیط دانشگاه پیوندی ناگسستنی با یکدیگر دارند و ضعف و ناتوانی در این مهارت می­تواند به زندگی دانشگاهی دانشجویان لطمه‌ای جدّی بزند. از همین رو سنجش این توانمندی بخش بسیار مهمّی از آزمون­های بسندگی زبان فارسی برای غیر فارسی ­زبانان را به خود اختصاص می‌دهد؛ ازجمله آزمون زبان فارسی دانشگاه فردوسی مشهد که بیش از چهار سال است به‌عنوان معیاری برای سنجش دانشجویان غیرفارسی ‌زبان برای ورود به دانشگاه ‌های ایران، در برخی شهرهای ایران و عراق برگزار می‌شود. در این پژوهش با کمک مدل آماری رش، به بررسی داده ‌های گردآوری ‌شدۀ بخشِ خواندن یکی از آزمون­ های برگزارشده در مرکز بین ‌المللی آموزش زبان فارسی دانشگاه فردوسی مشهد پرداختیم. بررسی ‌ها حاکی از این بود که پایایی بخشِ خواندن آزمون ۷۶/0 است که برای یک آزمون سرنوشت‌ ساز رقم بالایی به‌شمار نمی­آید. همچنین نقشۀ آزمودنی- پرسش، بیانگر این نکته بود که سؤالات آزمون عمدتاً در سطح داوطلبانِ متوسّط هستند. اما نقطة قوت این آزمون را می­توان تعداد نسبتاً زیاد پرسش ­ها و نیز تنوع آنها از نظر نوع اطلاعات خواسته شده دانست. مقایسۀ آزمون با مبانی نظری ارائه ‌شده توسّط طرّاحان تافل این امکان را فراهم کرد تا راهکارهایی برای بهبود پایایی آزمون ارائه بدهیم. سؤالات آزمون بسندگی فارسی دانشگاه فردوسی مشهد از نظر نوع مطابقت، عمدتاً از نوع محل ­یابی و یا سلسله ­ای هستند و پرسش‌ هایی از نوع ترکیبی و تولیدی می­تواند به سخت­تر شدن سؤالات بینجامد. اگر از منظر اهداف خواندن به آزمون نگاهی بیندازیم، مشاهده می­کنیم که سؤالات عمدتاً متناسب با اهداف خواندن برای پیداکردن اطّلاعات و نیز خواندن برای رسیدن به درک مقدّماتی هستند. به نظر می­رسد که این آزمون به سؤالاتی متناسب با اهداف خواندن برای یادگیری و نیز هدف خواندن برای ادغام اطّلاعات در بین چند متن مختلف نیاز دارد که از سطح دشواری بالاتری برخوردار هستند.}, keywords_fa = {آزمون بسندگی زبان فارسی,سنجش تکلیف‌محور,سنجش مهارت محور,مهارت خواندن,پایایی آزمون}, url = {https://jls.um.ac.ir/article_35179.html}, eprint = {https://jls.um.ac.ir/article_35179_065f04c806104de0b504dcf40dd2b2ad.pdf} }